code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> list:
"""simple docstring"""
UpperCamelCase = len(A__ )
UpperCamelCase = []
for i in range(len(A__ ) - pat_len + 1 ):
UpperCamelCase = True
for j in range(A__ ):
if s[i + j] != pattern[j]:
UpperCamelCase = False
break
if match_found:
position.append(A__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 28 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : list ) -> list:
"""simple docstring"""
_UpperCAmelCase : List[Any] = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase , _UpperCAmelCase : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 31 | 0 |
from itertools import product
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = sides_number
__a = max_face_number * dice_number
__a = [0] * (max_total + 1)
__a = 1
__a = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
__a = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __snake_case ( ):
__a = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a = 0
__a = 9
__a = 4 * 9
__a = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a = (4**9) * (6**6)
__a = peter_wins_count / total_games_number
__a = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 351 |
def __snake_case ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''Input value must be an \'int\' type''' )
__a = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 | 0 |
'''simple docstring'''
from functools import reduce
__lowerCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase__ ( UpperCAmelCase__ = N ) -> Tuple:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCAmelCase__, UpperCAmelCase__ : str(int(__snake_case ) * int(__snake_case ) ), n[i : i + 13] ) )
for i in range(len(__snake_case ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 162 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
A_ : Tuple =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : Dict )-> str:
# save results
if os.path.exists(snake_case ):
if os.path.exists(os.path.join(snake_case , 'config.json' ) ) and os.path.isfile(
os.path.join(snake_case , 'config.json' ) ):
os.remove(os.path.join(snake_case , 'config.json' ) )
if os.path.exists(os.path.join(snake_case , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(snake_case , 'pytorch_model.bin' ) ):
os.remove(os.path.join(snake_case , 'pytorch_model.bin' ) )
else:
os.makedirs(snake_case )
model.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] , snake_case : int=False )-> List[str]:
_lowerCamelCase = 2
if unlogit:
_lowerCamelCase = torch.pow(snake_case , snake_case )
_lowerCamelCase = p * torch.log(snake_case )
_lowerCamelCase = 0
return -plogp.sum(dim=-1 )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> Optional[int]:
logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(snake_case ) ) ) )
for row in range(len(snake_case ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] , snake_case : Any , snake_case : Optional[int] , snake_case : int=True , snake_case : Optional[int]=True , snake_case : Tuple=None , snake_case : Tuple=False )-> int:
_lowerCamelCase , _lowerCamelCase = model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCamelCase = torch.zeros(snake_case , snake_case ).to(args.device )
_lowerCamelCase = torch.zeros(snake_case , snake_case ).to(args.device )
if head_mask is None:
_lowerCamelCase = torch.ones(snake_case , snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCamelCase = None
_lowerCamelCase = 0.0
_lowerCamelCase = 0.0
for step, inputs in enumerate(tqdm(snake_case , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_lowerCamelCase = tuple(t.to(args.device ) for t in inputs )
((_lowerCamelCase) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCamelCase = model(snake_case , labels=snake_case , head_mask=snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case ):
_lowerCamelCase = entropy(attn.detach() , snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCamelCase = 2
_lowerCamelCase = torch.pow(torch.pow(snake_case , snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_lowerCamelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(snake_case )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(snake_case )
logger.info('Head ranked by importance scores' )
_lowerCamelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCamelCase = torch.arange(
head_importance.numel() , device=args.device )
_lowerCamelCase = head_ranks.view_as(snake_case )
print_ad_tensor(snake_case )
return attn_entropy, head_importance, total_loss
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : List[str] , snake_case : int )-> Dict:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(snake_case , snake_case , snake_case , compute_entropy=snake_case )
_lowerCamelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , snake_case , original_score * args.masking_threshold )
_lowerCamelCase = torch.ones_like(snake_case )
_lowerCamelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCamelCase = original_score
while current_score >= original_score * args.masking_threshold:
_lowerCamelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCamelCase = float('Inf' )
_lowerCamelCase = head_importance.view(-1 ).sort()[1]
if len(snake_case ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_lowerCamelCase = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_lowerCamelCase = new_head_mask.view(-1 )
_lowerCamelCase = 0.0
_lowerCamelCase = new_head_mask.view_as(snake_case )
_lowerCamelCase = new_head_mask.clone().detach()
print_ad_tensor(snake_case )
# Compute metric and head importance again
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , head_mask=snake_case )
_lowerCamelCase = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(snake_case )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : Tuple , snake_case : Tuple , snake_case : Optional[Any] )-> Optional[Any]:
_lowerCamelCase = datetime.now()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case )
_lowerCamelCase = 1 / loss
_lowerCamelCase = datetime.now() - before_time
_lowerCamelCase = sum(p.numel() for p in model.parameters() )
_lowerCamelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case , snake_case ):
_lowerCamelCase = [
v,
]
assert sum(len(snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case )
_lowerCamelCase = sum(p.numel() for p in model.parameters() )
_lowerCamelCase = datetime.now()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case , actually_pruned=snake_case , )
_lowerCamelCase = 1 / loss
_lowerCamelCase = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , snake_case , snake_case , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , snake_case , snake_case )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(snake_case , args.output_dir )
def SCREAMING_SNAKE_CASE_ ( )-> Union[str, Any]:
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=snake_case , type=snake_case , required=snake_case , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=snake_case , type=snake_case , required=snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=snake_case , type=snake_case , required=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=snake_case , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=snake_case , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=snake_case , type=snake_case , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=snake_case , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=snake_case , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=snake_case , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=snake_case , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=snake_case , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=snake_case , help='Batch size.' )
parser.add_argument('--seed' , type=snake_case , default=42 )
parser.add_argument('--local_rank' , type=snake_case , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=snake_case , default='' , help='Can be used for distant debugging.' )
_lowerCamelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCamelCase = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_lowerCamelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCamelCase = torch.device('cuda' , args.local_rank )
_lowerCamelCase = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCamelCase = nn.parallel.DistributedDataParallel(
snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case )
elif args.n_gpu > 1:
_lowerCamelCase = nn.DataParallel(snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case )
torch.save(snake_case , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , snake_case )
# Prepare dataset
_lowerCamelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCamelCase = (torch.from_numpy(snake_case ),)
_lowerCamelCase = TensorDataset(*snake_case )
_lowerCamelCase = RandomSampler(snake_case )
_lowerCamelCase = DataLoader(snake_case , sampler=snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case , snake_case , snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCamelCase = mask_heads(snake_case , snake_case , snake_case )
prune_heads(snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 80 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : list )-> list:
def merge(snake_case : list , snake_case : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case ) <= 1:
return collection
_lowerCamelCase = len(snake_case ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : int =input("""Enter numbers separated by a comma:\n""").strip()
A_ : Dict =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 80 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
A__: List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
A__: Optional[int] = parser.parse_args()
A__: Dict = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 276 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "roc_bert"
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE :List[str]=7_6_8 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Optional[int]="absolute" , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Optional[Any]=9_1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=2_4_8_5_8 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Tuple , ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =vocab_size
_a : List[str] =max_position_embeddings
_a : Optional[Any] =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[str] =num_attention_heads
_a : int =intermediate_size
_a : Any =hidden_act
_a : Dict =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : str =initializer_range
_a : Optional[int] =type_vocab_size
_a : Any =layer_norm_eps
_a : Any =use_cache
_a : Optional[int] =enable_pronunciation
_a : Optional[Any] =enable_shape
_a : Optional[Any] =pronunciation_embed_dim
_a : Tuple =pronunciation_vocab_size
_a : Union[str, Any] =shape_embed_dim
_a : Any =shape_vocab_size
_a : Tuple =concat_input
_a : List[str] =position_embedding_type
_a : List[str] =classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 276 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a_ :
def __init__( self : Any , lowercase : Dict , lowercase : Union[str, Any]=13 , lowercase : Union[str, Any]=7 , lowercase : List[str]=False , lowercase : Tuple=True , lowercase : int=False , lowercase : str=False , lowercase : str=19 , lowercase : Union[str, Any]=32 , lowercase : int=5 , lowercase : Dict=4 , lowercase : List[str]=37 , lowercase : Dict="gelu" , lowercase : Optional[int]=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : Optional[Any]=16 , lowercase : Dict=2 , lowercase : str=0.02 , lowercase : Dict=3 , lowercase : str=4 , lowercase : Dict=None , ):
"""simple docstring"""
lowercase_ :Union[str, Any] = parent
lowercase_ :int = batch_size
lowercase_ :List[Any] = seq_length
lowercase_ :str = is_training
lowercase_ :List[Any] = use_input_mask
lowercase_ :Dict = use_token_type_ids
lowercase_ :Any = use_labels
lowercase_ :List[Any] = vocab_size
lowercase_ :Optional[Any] = hidden_size
lowercase_ :Optional[int] = num_hidden_layers
lowercase_ :Dict = num_attention_heads
lowercase_ :Dict = intermediate_size
lowercase_ :Any = hidden_act
lowercase_ :Optional[int] = hidden_dropout_prob
lowercase_ :List[str] = attention_probs_dropout_prob
lowercase_ :List[Any] = max_position_embeddings
lowercase_ :List[Any] = type_vocab_size
lowercase_ :Optional[Any] = type_sequence_label_size
lowercase_ :Tuple = initializer_range
lowercase_ :Any = num_labels
lowercase_ :str = num_choices
lowercase_ :List[str] = scope
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :str = None
if self.use_input_mask:
lowercase_ :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :Dict = None
lowercase_ :str = None
lowercase_ :Tuple = None
if self.use_labels:
lowercase_ :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ :Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ :int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Dict = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowercase , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def lowercase__ ( self : List[Any] , lowercase : Any , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Optional[Any] = EsmForProteinFolding(config=lowercase ).float()
model.to(lowercase )
model.eval()
lowercase_ :Tuple = model(lowercase , attention_mask=lowercase )
lowercase_ :Tuple = model(lowercase )
lowercase_ :Optional[int] = model(lowercase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :str = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) :str = config_and_inputs
lowercase_ :Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = False
__A = (EsmForProteinFolding,) if is_torch_available() else ()
__A = ()
__A = {} if is_torch_available() else {}
__A = False
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :int = EsmFoldModelTester(self )
lowercase_ :Dict = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def lowercase__ ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
@unittest.skip("Does not support attention outputs" )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip
def lowercase__ ( self : int ):
"""simple docstring"""
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def lowercase__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowercase__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowercase__ ( self : int ):
"""simple docstring"""
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip("ESMFold only has one output format." )
def lowercase__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip("ESMFold does not support input chunking." )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def lowercase__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowercase__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowercase__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
pass
@require_torch
class a_ ( _lowerCAmelCase ):
@slow
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
lowercase_ :List[str] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ :Union[str, Any] = model(lowercase )["positions"]
lowercase_ :Any = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowercase , atol=1e-4 ) )
| 147 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
__A = ["input_features"]
def __init__( self : Any , lowercase : Tuple=80 , lowercase : Optional[int]=16_000 , lowercase : Optional[Any]=160 , lowercase : Optional[int]=30 , lowercase : List[Any]=400 , lowercase : Dict=0.0 , lowercase : Tuple=False , **lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
lowercase_ :Optional[int] = n_fft
lowercase_ :List[Any] = hop_length
lowercase_ :Tuple = chunk_length
lowercase_ :List[str] = chunk_length * sampling_rate
lowercase_ :Optional[Any] = self.n_samples // hop_length
lowercase_ :Any = sampling_rate
lowercase_ :List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowercase , norm="slaney" , mel_scale="slaney" , )
def lowercase__ ( self : str , lowercase : np.array ):
"""simple docstring"""
lowercase_ :Any = spectrogram(
lowercase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowercase_ :Any = log_spec[:, :-1]
lowercase_ :List[Any] = np.maximum(lowercase , log_spec.max() - 8.0 )
lowercase_ :Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase__ ( lowercase : List[np.ndarray] , lowercase : List[np.ndarray] , lowercase : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
lowercase_ :Optional[int] = np.array(lowercase , np.intaa )
lowercase_ :Any = []
for vector, length in zip(lowercase , attention_mask.sum(-1 ) ):
lowercase_ :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowercase_ :List[Any] = padding_value
normed_input_values.append(lowercase )
else:
lowercase_ :List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Tuple , lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase : bool = True , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Optional[bool] = None , lowercase : Optional[str] = "max_length" , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , **lowercase : Union[str, Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase_ :List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase_ :Optional[Any] = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ :Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowercase_ :List[Any] = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ :Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ :Optional[int] = [np.asarray([raw_speech] ).T]
lowercase_ :int = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowercase_ :Tuple = self.pad(
lowercase , padding=lowercase , max_length=max_length if max_length else self.n_samples , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase_ :Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowercase_ :List[Any] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowercase_ :Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowercase_ :List[str] = [self._np_extract_fbank_features(lowercase ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase ):
lowercase_ :Tuple = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_features]
else:
lowercase_ :Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase_ :Dict = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowercase_ :Tuple = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ :List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 147 | 1 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase ):
# Check if the input is valid
if not len(__lowerCamelCase ) == len(__lowerCamelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Any = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : int = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : List[str] = determinant_x / determinant
UpperCAmelCase_ : Optional[int] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 61 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "sequence-classification"
def __init__( self : Optional[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
if type(lowerCAmelCase_) == dict:
lowercase_ = Namespace(**lowerCAmelCase_)
lowercase_ = glue_output_modes[hparams.task]
lowercase_ = glue_tasks_num_labels[hparams.task]
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , self.mode)
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
return self.model(**lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ = outputs[0]
lowercase_ = self.trainer.lr_schedulers[0]["""scheduler"""]
lowercase_ = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.hparams
lowercase_ = processors[args.task]()
lowercase_ = processor.get_labels()
for mode in ["train", "dev"]:
lowercase_ = self._feature_file(lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowercase_ = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowercase_ = convert_examples_to_features(
lowerCAmelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase_)
torch.save(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False):
"""simple docstring"""
lowercase_ = """dev""" if mode == """test""" else mode
lowercase_ = self._feature_file(lowerCAmelCase_)
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
lowercase_ = torch.load(lowerCAmelCase_)
lowercase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ , lowercase_ = outputs[:2]
lowercase_ = logits.detach().cpu().numpy()
lowercase_ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowercase_ = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowercase_ = np.argmax(lowerCAmelCase_ , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = np.squeeze(lowerCAmelCase_)
lowercase_ = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , lowerCAmelCase_ , lowerCAmelCase_)}
lowercase_ = dict(results.items())
lowercase_ = results
return ret, preds_list, out_label_list
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : list):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_)
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowerCAmelCase_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
add_generic_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = GLUETransformer.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase_ = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
lowercase_ = GLUETransformer(__lowerCAmelCase )
lowercase_ = generic_train(__lowerCAmelCase , __lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase_ = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__lowerCAmelCase ) )
lowercase_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 136 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
if openai_config_file == "":
_UpperCamelCase = OpenAIGPTConfig()
else:
_UpperCamelCase = OpenAIGPTConfig.from_json_file(a__ )
_UpperCamelCase = OpenAIGPTModel(a__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_UpperCamelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
lowerCamelCase__ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 366 | import re
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
if len(re.findall("[ATCG]" , a__ ) ) != len(a__ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : List[Any] , A_ : str):
with open(A_ , encoding='''utf-8''') as input_file:
lowerCAmelCase_ : List[Any] = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''')
lowerCAmelCase_ : Any = input_file.read()
lowerCAmelCase_ : int = regexp.search(A_)
return match
def UpperCAmelCase__ ( self : Optional[int] , A_ : str):
with open(A_ , encoding='''utf-8''') as input_file:
lowerCAmelCase_ : int = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL)
lowerCAmelCase_ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCAmelCase_ : Optional[Any] = regexp.finditer(A_)
lowerCAmelCase_ : str = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : int = Path('''./datasets''')
lowerCAmelCase_ : str = list(dataset_paths.absolute().glob('''**/*.py'''))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A_)):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""")
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Tuple = Path('''./datasets''')
lowerCAmelCase_ : Union[str, Any] = list(dataset_paths.absolute().glob('''**/*.py'''))
for dataset in dataset_files:
if self._no_print_statements(str(A_)):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""")
| 103 | """simple docstring"""
from __future__ import annotations
from typing import Any
class _A :
def __init__( self , __lowerCAmelCase = 6 ):
"""simple docstring"""
lowercase = None
lowercase = None
self.create_linked_list(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = Node()
lowercase = current_node
lowercase = current_node
lowercase = current_node
for _ in range(1 , __lowerCAmelCase ):
lowercase = Node()
lowercase = current_node
lowercase = previous_node
lowercase = current_node
lowercase = self.front
lowercase = previous_node
def A__ ( self ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A__ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase = self.rear.next
if self.rear:
lowercase = data
def A__ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase = self.front.data
lowercase = None
return data
lowercase = self.front
lowercase = old_front.next
lowercase = old_front.data
lowercase = None
return data
def A__ ( self ):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""" )
def A__ ( self ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | 0 |
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts
SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets
SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Tuple = src_parent
SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ )
return True
def _A ( self : Tuple , UpperCAmelCase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 357 |
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
raise RuntimeError("CUDA out of memory." )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ = nn.Linear(3 , 4 )
lowerCamelCase_ = nn.BatchNormad(4 )
lowerCamelCase_ = nn.Linear(4 , 5 )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase ) ) )
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCamelCase , [128, 64, 32, 16, 8] )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase , UpperCamelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCamelCase_ ,lowerCamelCase_ = mock_training_loop_function("hello" )
self.assertListEqual(UpperCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def snake_case ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCamelCase ):
pass
with self.assertRaises(UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def snake_case ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCamelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def snake_case ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCamelCase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def snake_case ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCamelCase ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch.cuda.memory_allocated()
lowerCamelCase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCamelCase )
lowerCamelCase_ = release_memory(UpperCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCamelCase )
| 55 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str]=2 , _snake_case : Any=True , _snake_case : Any=False , _snake_case : List[str]=10 , _snake_case : Any=3 , _snake_case : Union[str, Any]=32 * 4 , _snake_case : List[Any]=32 * 6 , _snake_case : Tuple=4 , _snake_case : Dict=32 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = mask_feature_size
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
_snake_case)
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_snake_case)
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_snake_case) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_snake_case) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase ( self : Any):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCamelCase ( self : str , _snake_case : List[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(_snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(_snake_case) , config.decoder_config.decoder_layers)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : str=False):
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ = MaskFormerModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(pixel_values=_snake_case , pixel_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case , output_hidden_states=_snake_case)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(_snake_case , _snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(config=_snake_case)
model.to(_snake_case)
model.eval()
def comm_check_on_output(_snake_case : Tuple):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_snake_case , pixel_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case)
comm_check_on_output(_snake_case)
UpperCAmelCase_ = model(
pixel_values=_snake_case , pixel_mask=_snake_case , mask_labels=_snake_case , class_labels=_snake_case)
comm_check_on_output(_snake_case)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_snake_case)
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
@slow
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase_ = MaskFormerModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_snake_case),
'''mask_labels''': torch.randn((2, 10, *size) , device=_snake_case),
'''class_labels''': torch.zeros(2 , 10 , device=_snake_case).long(),
}
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(_snake_case)
UpperCAmelCase_ = model(**_snake_case)
self.assertTrue(outputs.loss is not None)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case).to(_snake_case)
UpperCAmelCase_ = model(**_snake_case , output_attentions=_snake_case)
self.assertTrue(outputs.attentions is not None)
def lowerCamelCase ( self : int):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case).loss
loss.backward()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case)
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_snake_case)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
snake_case_ : Dict = 1e-4
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''')
if is_vision_available()
else None
)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''').to(_snake_case)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
UpperCAmelCase_ = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case))
UpperCAmelCase_ = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case))
UpperCAmelCase_ = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
UpperCAmelCase_ = torch.tensor(_snake_case).to(_snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case))
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCAmelCase_ = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
UpperCAmelCase_ = torch.tensor(_snake_case).to(_snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case))
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCAmelCase_ = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='''pt''' , )
UpperCAmelCase_ = inputs['''pixel_values'''].to(_snake_case)
UpperCAmelCase_ = [el.to(_snake_case) for el in inputs['''mask_labels''']]
UpperCAmelCase_ = [el.to(_snake_case) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
self.assertTrue(outputs.loss is not None)
| 51 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.txt"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
SCREAMING_SNAKE_CASE__ = {
"facebook/esm2_t6_8M_UR50D": 1_024,
"facebook/esm2_t12_35M_UR50D": 1_024,
}
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , 'r' ) as f:
snake_case = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = VOCAB_FILES_NAMES
_lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase , lowerCAmelCase="<unk>" , lowerCAmelCase="<cls>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase="<eos>" , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
snake_case = load_vocab_file(lowerCAmelCase )
snake_case = dict(enumerate(self.all_tokens ) )
snake_case = {tok: ind for ind, tok in enumerate(self.all_tokens )}
snake_case = unk_token
snake_case = cls_token
snake_case = pad_token
snake_case = mask_token
snake_case = eos_token
snake_case = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token ) )
def snake_case ( self , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return text.split()
def snake_case ( self , lowerCAmelCase=False ):
"""simple docstring"""
return len(self._id_to_token )
def snake_case ( self ):
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = [self.cls_token_id]
snake_case = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
snake_case = [1] + ([0] * len(lowerCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase ) + [1]
return mask
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(lowerCAmelCase , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def snake_case ( self ):
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = False ):
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase )
| 149 | """simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = random.Random()
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]=1.0 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
snake_case = global_rng
snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=4_00 , lowerCAmelCase=20_00 , lowerCAmelCase=20_48 , lowerCAmelCase=1_28 , lowerCAmelCase=1 , lowerCAmelCase=5_12 , lowerCAmelCase=30 , lowerCAmelCase=4_41_00 , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = min_seq_length
snake_case = max_seq_length
snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case = spectrogram_length
snake_case = feature_size
snake_case = num_audio_channels
snake_case = hop_length
snake_case = chunk_length
snake_case = sampling_rate
def snake_case ( self ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case ( self , lowerCAmelCase=False , lowerCAmelCase=False ):
"""simple docstring"""
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case = [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = TvltFeatureExtractor
def snake_case ( self ):
"""simple docstring"""
snake_case = TvltFeatureExtractionTester(self )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'spectrogram_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'num_audio_channels' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'hop_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'chunk_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate' ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = feat_extract_first.save_pretrained(lowerCAmelCase )[0]
check_json_file_has_correct_format(lowerCAmelCase )
snake_case = self.feature_extraction_class.from_pretrained(lowerCAmelCase )
snake_case = feat_extract_first.to_dict()
snake_case = feat_extract_second.to_dict()
snake_case = dict_first.pop('mel_filters' )
snake_case = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = os.path.join(lowerCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCAmelCase )
snake_case = self.feature_extraction_class.from_json_file(lowerCAmelCase )
snake_case = feat_extract_first.to_dict()
snake_case = feat_extract_second.to_dict()
snake_case = dict_first.pop('mel_filters' )
snake_case = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
snake_case = feature_extractor(
lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 , mask_audio=lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case = np.asarray(lowerCAmelCase )
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case = ds.sort('id' ).select(range(lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case ( self ):
"""simple docstring"""
snake_case = self._load_datasamples(1 )
snake_case = TvltFeatureExtractor()
snake_case = feature_extractor(lowerCAmelCase , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
snake_case = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCAmelCase , atol=1E-4 ) )
| 149 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : List[Any] = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class UpperCAmelCase_ (_lowercase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = "roberta"
def __init__( self , SCREAMING_SNAKE_CASE_=5_02_65 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Any:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
__lowerCamelCase : str = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : str = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Optional[int] = type_vocab_size
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : Tuple = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : List[Any] = classifier_dropout
class UpperCAmelCase_ (_lowercase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCamelCase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 185 | def _UpperCamelCase ( snake_case__ ) -> bool:
if not isinstance(snake_case__, snake_case__ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__UpperCAmelCase : Optional[int] = str(snake_case__ )
__UpperCAmelCase : Any = "".join(sorted(snake_case__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _UpperCamelCase ( snake_case__ = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : int = 1
while True:
if check_bouncy(snake_case__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(99)}')
| 157 | 0 |
from collections import Counter
from timeit import timeit
def A__ ( SCREAMING_SNAKE_CASE__ = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""").lower()).values()) < 2
def A__ ( SCREAMING_SNAKE_CASE__ = "") -> bool:
if len(SCREAMING_SNAKE_CASE__) == 0:
return True
__snake_case: Optional[Any] = input_str.replace(""" """ , """""").lower()
# character_freq_dict: Stores the frequency of every character in the input string
__snake_case: dict[str, int] = {}
for character in lower_case_input_str:
__snake_case: int = character_freq_dict.get(SCREAMING_SNAKE_CASE__ , 0) + 1
__snake_case: List[str] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE__ = "") -> None:
print("""\nFor string = """ , SCREAMING_SNAKE_CASE__ , """:""")
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(SCREAMING_SNAKE_CASE__) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(SCREAMING_SNAKE_CASE__) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
__UpperCAmelCase : int = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__UpperCAmelCase : int = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 371 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 293 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
snake_case__ : Dict = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
snake_case__ : str = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
snake_case__ : Tuple = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
snake_case__ : str = sorted(arg_to_scheduler.keys())
snake_case__ : Union[str, Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class A_ ( pl.LightningModule ):
def __init__(self :Optional[int] , _UpperCamelCase :argparse.Namespace , _UpperCamelCase :Optional[Any]=None , _UpperCamelCase :Dict="base" , _UpperCamelCase :int=None , _UpperCamelCase :Optional[int]=None , _UpperCamelCase :Any=None , **_UpperCamelCase :Any , )-> Dict:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_UpperCamelCase )
__A = 0
__A = Path(self.hparams.output_dir )
__A = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__A = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=_UpperCamelCase , **_UpperCamelCase , )
else:
__A = config
__A = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , _UpperCamelCase , _UpperCamelCase ):
assert hasattr(self.config , _UpperCamelCase ), f"""model config doesn't have a `{p}` attribute"""
setattr(self.config , _UpperCamelCase , getattr(self.hparams , _UpperCamelCase ) )
if tokenizer is None:
__A = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_UpperCamelCase , )
else:
__A = tokenizer
__A = MODEL_MODES[mode]
if model is None:
__A = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_UpperCamelCase , )
else:
__A = model
def _lowerCAmelCase (self :int , *_UpperCamelCase :List[str] , **_UpperCamelCase :str )-> Optional[Any]:
__A = self.model_type.from_pretrained(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :str )-> Tuple:
__A = arg_to_scheduler[self.hparams.lr_scheduler]
__A = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__A = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def _lowerCAmelCase (self :int )-> List[Any]:
__A = self.model
__A = ['''bias''', '''LayerNorm.weight''']
__A = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
__A = Adafactor(
_UpperCamelCase , lr=self.hparams.learning_rate , scale_parameter=_UpperCamelCase , relative_step=_UpperCamelCase )
else:
__A = AdamW(
_UpperCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__A = optimizer
__A = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _lowerCAmelCase (self :int , _UpperCamelCase :List[Any] , _UpperCamelCase :List[str] )-> Optional[Any]:
return self.validation_step(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :str , _UpperCamelCase :Optional[int] )-> Any:
return self.validation_end(_UpperCamelCase )
def _lowerCAmelCase (self :Tuple )-> int:
__A = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__A = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :str )-> List[Any]:
if stage == "test":
__A = len(self.test_dataloader().dataset )
else:
__A = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=_UpperCamelCase )
__A = len(self.train_dataloader().dataset )
def _lowerCAmelCase (self :Any , _UpperCamelCase :str , _UpperCamelCase :int , _UpperCamelCase :bool = False )-> str:
raise NotImplementedError('''You must implement this for your task''' )
def _lowerCAmelCase (self :Any )-> List[Any]:
return self.train_loader
def _lowerCAmelCase (self :Tuple )-> List[Any]:
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=_UpperCamelCase )
def _lowerCAmelCase (self :Dict )-> Union[str, Any]:
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=_UpperCamelCase )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Optional[int] )-> List[Any]:
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
_UpperCamelCase , list(filter(_UpperCamelCase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Dict[str, Any] )-> None:
__A = self.output_dir.joinpath('''best_tfmr''' )
__A = self.step_count
self.model.save_pretrained(_UpperCamelCase )
self.tokenizer.save_pretrained(_UpperCamelCase )
@staticmethod
def _lowerCAmelCase (_UpperCamelCase :Optional[Any] , _UpperCamelCase :Any )-> Any:
parser.add_argument(
'''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(_UpperCamelCase ).parent / '''test_run''' / '''cache''' ) , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=_UpperCamelCase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=_UpperCamelCase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=_UpperCamelCase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=_UpperCamelCase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=_UpperCamelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=_UpperCamelCase , metavar=_UpperCamelCase , type=_UpperCamelCase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_UpperCamelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=_UpperCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=_UpperCamelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=_UpperCamelCase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=_UpperCamelCase )
parser.add_argument('''--train_batch_size''' , default=32 , type=_UpperCamelCase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=_UpperCamelCase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class A_ ( pl.Callback ):
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :Union[str, Any] )-> Tuple:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class A_ ( pl.Callback ):
def _lowerCAmelCase (self :str , _UpperCamelCase :List[Any] , _UpperCamelCase :List[str] )-> Union[str, Any]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_UpperCamelCase )
class A_ ( pl.Callback ):
def _lowerCAmelCase (self :int , _UpperCamelCase :Tuple , _UpperCamelCase :List[Any] )-> List[Any]:
__A = trainer.lr_schedulers[0]['''scheduler''']
__A = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_UpperCamelCase )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :pl.Trainer , _UpperCamelCase :pl.LightningModule )-> Dict:
rank_zero_info('''***** Validation results *****''' )
__A = trainer.callback_metrics
# Log results
for key in sorted(_UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_UpperCamelCase , str(metrics[key] ) ) )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :pl.Trainer , _UpperCamelCase :pl.LightningModule )-> Optional[Any]:
rank_zero_info('''***** Test results *****''' )
__A = trainer.callback_metrics
# Log and save results to file
__A = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(_UpperCamelCase , '''w''' ) as writer:
for key in sorted(_UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_UpperCamelCase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(_UpperCamelCase , str(metrics[key] ) ) )
def _a ( lowerCamelCase: Any , lowerCamelCase: List[Any] ) -> None:
'''simple docstring'''
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCamelCase ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCamelCase , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCamelCase )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCamelCase , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCamelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCamelCase , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCamelCase ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCamelCase , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( lowerCamelCase: BaseTransformer , lowerCamelCase: argparse.Namespace , lowerCamelCase: int=None , lowerCamelCase: Any=True , lowerCamelCase: Tuple=[] , lowerCamelCase: str=None , lowerCamelCase: List[str]=None , **lowerCamelCase: Any , ) -> Optional[Any]:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
__A = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
__A = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCamelCase )
if logging_callback is None:
__A = LoggingCallback()
__A = {}
if args.fpaa:
__A = 16
if args.gpus > 1:
__A = '''auto'''
__A = '''ddp'''
__A = args.accumulate_grad_batches
__A = None
__A = '''auto'''
__A = pl.Trainer.from_argparse_args(
lowerCamelCase , weights_summary=lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCamelCase , )
if args.do_train:
trainer.fit(lowerCamelCase )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 117 |
from __future__ import annotations
def _a ( lowerCamelCase: list[float] , lowerCamelCase: Tuple ) -> List[str]:
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCamelCase ):
print(F"""{i}\t\t{d}""" )
def _a ( lowerCamelCase: list[dict[str, int]] , lowerCamelCase: list[float] , lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
for j in range(lowerCamelCase ):
__A , __A , __A = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _a ( lowerCamelCase: list[dict[str, int]] , lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: int ) -> list[float]:
'''simple docstring'''
__A = [float('''inf''' )] * vertex_count
__A = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase ):
__A , __A , __A = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__A = distance[u] + w
__A = check_negative_cycle(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Dict = int(input('Enter number of vertices: ').strip())
snake_case__ : Optional[int] = int(input('Enter number of edges: ').strip())
snake_case__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
snake_case__ , snake_case__ , snake_case__ : Dict = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
snake_case__ : List[Any] = {'src': src, 'dst': dest, 'weight': weight}
snake_case__ : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
snake_case__ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 117 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case ={
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__snake_case =TypeVar("""T""")
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self : int , UpperCAmelCase__ : T ) -> List[str]:
lowerCAmelCase = data
lowerCAmelCase = None
def __str__( self : Optional[int] ) -> str:
return F'''{self.data}'''
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self : Optional[Any] ) -> None:
lowerCAmelCase = None
def __iter__( self : Any ) -> Iterator[T]:
lowerCAmelCase = self.top
while node:
yield node.data
lowerCAmelCase = node.next
def __str__( self : str ) -> str:
return "->".join([str(UpperCAmelCase__ ) for item in self] )
def __len__( self : Optional[int] ) -> int:
return len(tuple(iter(self ) ) )
def __UpperCAmelCase ( self : Optional[Any] ) -> bool:
return self.top is None
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : T ) -> None:
lowerCAmelCase = Node(UpperCAmelCase__ )
if not self.is_empty():
lowerCAmelCase = self.top
lowerCAmelCase = node
def __UpperCAmelCase ( self : str ) -> T:
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCAmelCase__ )
lowerCAmelCase = self.top
lowerCAmelCase = self.top.next
return pop_node.data
def __UpperCAmelCase ( self : List[Any] ) -> T:
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __UpperCAmelCase ( self : str ) -> None:
lowerCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 55 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
_UpperCAmelCase : Tuple = tuple[int, int]
class lowercase :
def __init__( self , A_ , A_ ) -> None:
"""simple docstring"""
UpperCamelCase = vertices
UpperCamelCase = {
(min(A_ ), max(A_ )): weight for edge, weight in edges.items()
}
def __UpperCamelCase ( self , A_ , A_ ) -> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCamelCase = weight
def __UpperCamelCase ( self ) -> Graph:
"""simple docstring"""
UpperCamelCase = Graph({min(self.vertices )} , {} )
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCamelCase = edge
UpperCamelCase = weight
subgraph.add_edge(A_ , A_ )
return subgraph
def A ( lowercase = "p107_network.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = os.path.abspath(os.path.dirname(lowercase ) )
UpperCamelCase = os.path.join(lowercase , lowercase )
UpperCamelCase = {}
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
with open(lowercase ) as f:
UpperCamelCase = f.read().strip().split('\n' )
UpperCamelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowercase ) ):
for edgea in range(lowercase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCamelCase = int(adjaceny_matrix[edgea][edgea] )
UpperCamelCase = Graph(set(range(len(lowercase ) ) ) , lowercase )
UpperCamelCase = graph.prims_algorithm()
UpperCamelCase = sum(graph.edges.values() )
UpperCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 222 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(lowercase , lowercase , lowercase )
count += _in_place_quick_sort(lowercase , lowercase , p - 1 )
count += _in_place_quick_sort(lowercase , p + 1 , lowercase )
return count
def A ( lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(lowercase , lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
_UpperCAmelCase : Union[str, Any] = TemporaryFile()
_UpperCAmelCase : List[Any] = 100 # 1000 elements are to be sorted
_UpperCAmelCase ,_UpperCAmelCase : Any = 0, 1 # mean and standard deviation
_UpperCAmelCase : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_UpperCAmelCase : Any = np.load(outfile)
_UpperCAmelCase : str = len(M) - 1
_UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 222 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> str:
UpperCamelCase__ : List[Any] = []
for line in lines:
UpperCamelCase__ : int = re.sub(r'''#.*''' , '''''' , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
UpperCamelCase__ : Dict = '''\n'''.join(__UpperCAmelCase )
# Make a hash from all this code
UpperCamelCase__ : Dict = full_str.encode('''utf-8''' )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase_ = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase_ = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase_ = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase_ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 247 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=7, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=99, __magic_name__=32, __magic_name__=5, __magic_name__=4, __magic_name__=37, __magic_name__="gelu", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=512, __magic_name__=16, __magic_name__=2, __magic_name__=0.02, __magic_name__=4, ) -> str:
"""simple docstring"""
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Dict = seq_length
UpperCamelCase__ : Tuple = is_training
UpperCamelCase__ : Any = use_attention_mask
UpperCamelCase__ : Tuple = use_token_type_ids
UpperCamelCase__ : Union[str, Any] = use_labels
UpperCamelCase__ : List[Any] = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : int = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = max_position_embeddings
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : int = type_sequence_label_size
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : List[str] = num_choices
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : Tuple = None
if self.use_attention_mask:
UpperCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : int = None
if self.use_token_type_ids:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ : List[Any] = BertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__magic_name__, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = config_and_inputs
UpperCamelCase__ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : str = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = config_and_inputs
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : int = True
a : List[str] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = FlaxBertModelTester(self )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
UpperCamelCase__ : Dict = FlaxBertModel.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 247 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
lowerCamelCase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]=False , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : str="<s>" , UpperCamelCase : Optional[Any]="</s>" , UpperCamelCase : Any="<unk>" , UpperCamelCase : Tuple="<sep>" , UpperCamelCase : Union[str, Any]="<pad>" , UpperCamelCase : Any="<cls>" , UpperCamelCase : Tuple="<mask>" , UpperCamelCase : Any=["<eop>", "<eod>"] , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
lowercase__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__ = jieba
lowercase__ = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : str ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__(self : Dict , UpperCamelCase : int ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
lowercase__ = ''' '''.join(inputs.strip().split() )
else:
lowercase__ = inputs
lowercase__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase__ = unicodedata.normalize('''NFKD''' , UpperCamelCase )
lowercase__ = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] )
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCamelCase__ (self : List[str] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = self.preprocess_text(UpperCamelCase )
lowercase__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
lowercase__ = []
for piece in pieces:
if len(UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase )
else:
new_pieces.append(UpperCamelCase )
return new_pieces
def UpperCamelCase__ (self : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase )
def UpperCamelCase__ (self : int , UpperCamelCase : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase )
def UpperCamelCase__ (self : List[str] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = ''''''.join(UpperCamelCase ).replace(UpperCamelCase , ''' ''' ).strip()
return out_string
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ (self : int , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1]
return ([0] * len(UpperCamelCase )) + [1, 1]
def UpperCamelCase__ (self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ (self : Dict , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def UpperCamelCase__ (self : Optional[Any] , *UpperCamelCase : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = super()._decode(*UpperCamelCase , **UpperCamelCase )
lowercase__ = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 2 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return "".join(sorted(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return word_by_signature[signature(UpperCAmelCase_ )]
snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
snake_case : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
snake_case : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 94 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( _a , _a , _a , _a ): # noqa: E741
"""simple docstring"""
while r - l > 1:
lowerCAmelCase__ : Any = (l + r) // 2
if v[m] >= key:
lowerCAmelCase__ : int = m
else:
lowerCAmelCase__ : Tuple = m # noqa: E741
return r
def lowerCamelCase_ ( _a ):
"""simple docstring"""
if len(_a ) == 0:
return 0
lowerCAmelCase__ : Optional[int] = [0] * len(_a )
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : int = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
lowerCAmelCase__ : str = v[i]
elif v[i] > tail[length - 1]:
lowerCAmelCase__ : Any = v[i]
length += 1
else:
lowerCAmelCase__ : int = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 |
def lowerCamelCase_ ( _a ):
"""simple docstring"""
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase_ :
"""simple docstring"""
a_ =XGLMConfig
a_ ={}
a_ ="""gelu"""
def __init__( self : Union[str, Any] , _a : Optional[int] , _a : Dict=14 , _a : Optional[int]=7 , _a : List[Any]=True , _a : List[Any]=True , _a : Any=True , _a : Optional[int]=99 , _a : List[str]=32 , _a : str=2 , _a : int=4 , _a : Union[str, Any]=37 , _a : Dict="gelu" , _a : List[Any]=0.1 , _a : Optional[Any]=0.1 , _a : Tuple=512 , _a : Union[str, Any]=0.02 , ) -> Optional[int]:
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Optional[Any] = use_input_mask
__lowerCamelCase : int = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Optional[Any] = d_model
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = ffn_dim
__lowerCamelCase : List[Any] = activation_function
__lowerCamelCase : Dict = activation_dropout
__lowerCamelCase : Any = attention_dropout
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : str = None
__lowerCamelCase : Any = 0
__lowerCamelCase : Dict = 2
__lowerCamelCase : str = 1
def _lowercase ( self : int ) -> List[str]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def _lowercase ( self : Dict ) -> List[str]:
__lowerCamelCase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : List[Any] = self.get_config()
__lowerCamelCase : Any = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self : Optional[int] ) -> Tuple:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_a , )
def _lowercase ( self : List[str] ) -> Tuple:
__lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCamelCase : int = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
a_ =(TFXGLMForCausalLM,) if is_tf_available() else ()
a_ =(
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
a_ =False
a_ =False
a_ =False
def _lowercase ( self : List[str] ) -> str:
__lowerCamelCase : List[str] = TFXGLMModelTester(self )
__lowerCamelCase : Optional[int] = ConfigTester(self , config_class=_a , n_embd=37 )
def _lowercase ( self : Any ) -> Dict:
self.config_tester.run_common_tests()
@slow
def _lowercase ( self : Tuple ) -> str:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[Any] = TFXGLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def _lowercase ( self : Dict ) -> Dict:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict , _a : str=True ) -> str:
__lowerCamelCase : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : List[Any] = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__lowerCamelCase : Optional[int] = model.generate(_a , do_sample=_a , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _a )
@slow
def _lowercase ( self : Dict ) -> Optional[Any]:
__lowerCamelCase : List[Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__lowerCamelCase : Any = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__lowerCamelCase : Dict = model.generate(_a , do_sample=_a , seed=[7, 0] )
__lowerCamelCase : int = tokenizer.decode(output_ids[0] , skip_special_tokens=_a )
__lowerCamelCase : List[Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_a , _a )
@slow
def _lowercase ( self : str ) -> Optional[Any]:
__lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = 'left'
# use different length sentences to test batching
__lowerCamelCase : int = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : Optional[int] = tokenizer(_a , return_tensors='tf' , padding=_a )
__lowerCamelCase : str = inputs['input_ids']
__lowerCamelCase : Union[str, Any] = model.generate(input_ids=_a , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__lowerCamelCase : Any = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCamelCase : Dict = model.generate(input_ids=_a , max_new_tokens=12 )
__lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCamelCase : int = model.generate(input_ids=_a , max_new_tokens=12 )
__lowerCamelCase : Tuple = tokenizer.batch_decode(_a , skip_special_tokens=_a )
__lowerCamelCase : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
__lowerCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
__lowerCamelCase : str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
| 208 |
'''simple docstring'''
def a_ ( _lowerCAmelCase ) -> list:
if n_term == "":
return []
__lowerCamelCase : list = []
for temp in range(int(_lowerCAmelCase ) ):
series.append(F'1/{temp + 1}' if series else '1' )
return series
if __name__ == "__main__":
_UpperCamelCase = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 208 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_UpperCAmelCase : List[str] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
_UpperCAmelCase : str = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
_UpperCAmelCase : Tuple = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCAmelCase = new_id
# turn into Numpy arrays
__lowerCAmelCase = np.array(lowerCamelCase)
__lowerCAmelCase = np.array(lowerCamelCase)
if reduce_labels:
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label - 1
__lowerCAmelCase = 2_5_5
__lowerCAmelCase = label != ignore_index
__lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = pred_label[mask]
__lowerCAmelCase = np.array(lowerCamelCase)[mask]
__lowerCAmelCase = pred_label[pred_label == label]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0]
__lowerCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
__lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
# compute metrics
__lowerCAmelCase = {}
__lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum()
__lowerCAmelCase = total_area_intersect / total_area_union
__lowerCAmelCase = total_area_intersect / total_area_label
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = np.nanmean(lowerCamelCase)
__lowerCAmelCase = all_acc
__lowerCAmelCase = iou
__lowerCAmelCase = acc
if nan_to_num is not None:
__lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ):
__lowerCAmelCase = mean_iou(
results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , )
return iou_result
| 9 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __magic_name__( ):
__lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)]
__lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0)
return (arr, r)
_UpperCAmelCase : Dict = make_dataset()
def __magic_name__( lowerCamelCase, lowerCamelCase):
for triplet in permutations(lowerCamelCase, 3):
if sum(lowerCamelCase) == target:
return tuple(sorted(lowerCamelCase))
return (0, 0, 0)
def __magic_name__( lowerCamelCase, lowerCamelCase):
arr.sort()
__lowerCAmelCase = len(lowerCamelCase)
for i in range(n - 1):
__lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __magic_name__( ):
__lowerCAmelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowerCAmelCase = '''
triplet_sum1(*dataset)
'''
__lowerCAmelCase = '''
triplet_sum2(*dataset)
'''
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
return (min(lowerCamelCase), min(lowerCamelCase))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : Union[str, Any] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 9 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Optional[int] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
UpperCamelCase :Union[str, Any] = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
UpperCamelCase :Dict = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase :Optional[Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
UpperCamelCase :Dict = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase__ )-1}''' )
if "norm" in key:
UpperCamelCase :Any = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase :Dict = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
UpperCamelCase :Optional[int] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase__ )-1}''' )
if "layer_norm1" in key:
UpperCamelCase :int = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
UpperCamelCase :List[Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase :Union[str, Any] = key[key.find('''block''' ) + len('''block''' )]
UpperCamelCase :Optional[int] = key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase__ )-1}''' )
if "attn.q" in key:
UpperCamelCase :Optional[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
UpperCamelCase :Any = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
UpperCamelCase :Dict = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
UpperCamelCase :Tuple = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
UpperCamelCase :List[Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
UpperCamelCase :List[str] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
UpperCamelCase :Optional[Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
UpperCamelCase :Any = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase :Dict = key[key.find('''linear_c''' ) + len('''linear_c''' )]
UpperCamelCase :List[str] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase__ )-1}''' )
if "bot_conv" in key:
UpperCamelCase :List[Any] = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
UpperCamelCase :int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
UpperCamelCase :Optional[Any] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
UpperCamelCase :Tuple = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
UpperCamelCase :Optional[Any] = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
UpperCamelCase :int = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
UpperCamelCase :Union[str, Any] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
UpperCamelCase :int = key.replace('''module.last_layer_depth''' , '''head.head''' )
UpperCamelCase :Tuple = value
return new_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase :Optional[int] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCamelCase :Tuple = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase :Dict = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase :Optional[Any] = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase :str = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase :Tuple = kv_bias[config.hidden_sizes[i] :]
def _A ( ):
UpperCamelCase :Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase :int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
UpperCamelCase :Union[str, Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCamelCase :List[str] = GLPNImageProcessor()
# prepare image
UpperCamelCase :Tuple = prepare_img()
UpperCamelCase :Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase :Any = torch.load(UpperCamelCase__ , map_location=torch.device('''cpu''' ) )
# rename keys
UpperCamelCase :Tuple = rename_keys(UpperCamelCase__ )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase__ , UpperCamelCase__ )
# create HuggingFace model and load state dict
UpperCamelCase :str = GLPNForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# forward pass
UpperCamelCase :List[str] = model(UpperCamelCase__ )
UpperCamelCase :Dict = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase :int = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
UpperCamelCase :List[Any] = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
UpperCamelCase :Union[str, Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCamelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you\'re pushing to the hub.""",
)
__snake_case = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 259 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__magic_name__ = "src/transformers"
__magic_name__ = "docs/source/en"
__magic_name__ = "."
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start prompt.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__magic_name__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
__magic_name__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__magic_name__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__magic_name__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase_ )
return [m.group(0 ) for m in matches]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 2 if text == """✅""" or text == """❌""" else len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = (width - text_length) // 2
__SCREAMING_SNAKE_CASE = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__SCREAMING_SNAKE_CASE = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__SCREAMING_SNAKE_CASE = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__SCREAMING_SNAKE_CASE = collections.defaultdict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = collections.defaultdict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = collections.defaultdict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = collections.defaultdict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = collections.defaultdict(UpperCamelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = None
if attr_name.endswith("""Tokenizer""" ):
__SCREAMING_SNAKE_CASE = slow_tokenizers
__SCREAMING_SNAKE_CASE = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
__SCREAMING_SNAKE_CASE = fast_tokenizers
__SCREAMING_SNAKE_CASE = attr_name[:-13]
elif _re_tf_models.match(UpperCamelCase_ ) is not None:
__SCREAMING_SNAKE_CASE = tf_models
__SCREAMING_SNAKE_CASE = _re_tf_models.match(UpperCamelCase_ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase_ ) is not None:
__SCREAMING_SNAKE_CASE = flax_models
__SCREAMING_SNAKE_CASE = _re_flax_models.match(UpperCamelCase_ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase_ ) is not None:
__SCREAMING_SNAKE_CASE = pt_models
__SCREAMING_SNAKE_CASE = _re_pt_models.match(UpperCamelCase_ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
__SCREAMING_SNAKE_CASE = True
break
# Try again after removing the last word in the name
__SCREAMING_SNAKE_CASE = """""".join(camel_case_split(UpperCamelCase_ )[:-1] )
# Let's build that table!
__SCREAMING_SNAKE_CASE = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__SCREAMING_SNAKE_CASE = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__SCREAMING_SNAKE_CASE = [len(UpperCamelCase_ ) + 2 for c in columns]
__SCREAMING_SNAKE_CASE = max([len(UpperCamelCase_ ) for name in model_names] ) + 2
# Build the table per se
__SCREAMING_SNAKE_CASE = """|""" + """|""".join([_center_text(UpperCamelCase_ , UpperCamelCase_ ) for c, w in zip(UpperCamelCase_ , UpperCamelCase_ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
__SCREAMING_SNAKE_CASE = {True: """✅""", False: """❌"""}
for name in model_names:
__SCREAMING_SNAKE_CASE = model_name_to_prefix[name]
__SCREAMING_SNAKE_CASE = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCamelCase_ , UpperCamelCase_ ) for l, w in zip(UpperCamelCase_ , UpperCamelCase_ )] ) + "|\n"
return table
def _lowerCAmelCase ( UpperCamelCase_=False ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
__SCREAMING_SNAKE_CASE = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCamelCase_ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__magic_name__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 255 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__magic_name__ = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = {}
state_dict.pop("""pixel_mean""" , UpperCamelCase_ )
state_dict.pop("""pixel_std""" , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE = key.replace(UpperCamelCase_ , UpperCamelCase_ )
if re.match(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = int(re.match(UpperCamelCase_ , UpperCamelCase_ ).group(2 ) )
if layer_nb == 0:
__SCREAMING_SNAKE_CASE = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
__SCREAMING_SNAKE_CASE = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
__SCREAMING_SNAKE_CASE = key.replace("""layers.2""" , """proj_out""" )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="ybelkada/segment-anything" ):
__SCREAMING_SNAKE_CASE = hf_hub_download(UpperCamelCase_ , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__SCREAMING_SNAKE_CASE = SamConfig()
elif "sam_vit_l" in model_name:
__SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__SCREAMING_SNAKE_CASE = SamConfig(
vision_config=UpperCamelCase_ , )
elif "sam_vit_h" in model_name:
__SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__SCREAMING_SNAKE_CASE = SamConfig(
vision_config=UpperCamelCase_ , )
__SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase_ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = replace_keys(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SamModel(UpperCamelCase_ )
hf_model.load_state_dict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = hf_model.to("""cuda""" )
__SCREAMING_SNAKE_CASE = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = [[[400, 650]]]
__SCREAMING_SNAKE_CASE = [[1]]
__SCREAMING_SNAKE_CASE = processor(images=np.array(UpperCamelCase_ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
__SCREAMING_SNAKE_CASE = processor(
images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
__SCREAMING_SNAKE_CASE = ((75, 275, 1725, 850),)
__SCREAMING_SNAKE_CASE = processor(images=np.array(UpperCamelCase_ ) , input_boxes=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
__SCREAMING_SNAKE_CASE = [[[400, 650], [800, 650]]]
__SCREAMING_SNAKE_CASE = [[1, 1]]
__SCREAMING_SNAKE_CASE = processor(
images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__magic_name__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 255 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : List[Any]=[10, 20, 30, 40] , UpperCAmelCase_ : Union[str, Any]=[1, 1, 2, 1] , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Dict="relu" , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[Any]=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Optional[Any] = embeddings_size
lowerCAmelCase : Optional[Any] = hidden_sizes
lowerCAmelCase : List[Any] = depths
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Tuple = num_labels
lowerCAmelCase : str = scope
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
lowerCAmelCase : List[str] = TFResNetModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[str] = self.num_labels
lowerCAmelCase : Dict = TFResNetForImageClassification(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
lowerCAmelCase_ : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase_ : Dict = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Any = False
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = TFResNetModelTester(self )
lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Tuple ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def lowercase__ ( self : Tuple ):
pass
def lowercase__ ( self : List[Any] ):
lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : str ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Any ):
def check_hidden_states_output(UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Tuple = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Tuple = layer_type
lowerCAmelCase : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def lowercase__ ( self : Optional[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Any ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : List[str] = self.default_image_processor
lowerCAmelCase : str = prepare_img()
lowerCAmelCase : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' )
# forward pass
lowerCAmelCase : int = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : List[str] = tf.constant([-11.1069, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 138 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = ["model.decoder.embed_positions.weights"]
def A_ ( snake_case : Any ) -> List[Any]:
'''simple docstring'''
if "emb" in name:
__UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__UpperCamelCase = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__UpperCamelCase = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__UpperCamelCase = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def A_ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]:
'''simple docstring'''
__UpperCamelCase = list(state_dict.keys() )
__UpperCamelCase = {}
for key in keys:
__UpperCamelCase = state_dict.pop(snake_case )
__UpperCamelCase = rename_keys(snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase = val[:hidden_size, :]
__UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase = val
else:
__UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def A_ ( snake_case : str ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
__UpperCamelCase = 1024
__UpperCamelCase = 24
__UpperCamelCase = 16
elif checkpoint == "medium":
__UpperCamelCase = 1536
__UpperCamelCase = 48
__UpperCamelCase = 24
elif checkpoint == "large":
__UpperCamelCase = 2048
__UpperCamelCase = 48
__UpperCamelCase = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
__UpperCamelCase = MusicgenDecoderConfig(
hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , )
return config
@torch.no_grad()
def A_ ( snake_case : Any , snake_case : str=None , snake_case : Any=None , snake_case : Union[str, Any]="cpu" ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = MusicGen.get_pretrained(snake_case , device=snake_case )
__UpperCamelCase = decoder_config_from_checkpoint(snake_case )
__UpperCamelCase = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase = rename_state_dict(
snake_case , hidden_size=decoder_config.hidden_size )
__UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' )
__UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__UpperCamelCase = MusicgenForCausalLM(snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(snake_case , strict=snake_case )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case )
if len(snake_case ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(snake_case ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case )
# check we can do a forward pass
__UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__UpperCamelCase = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case )
# set the appropriate bos/pad token ids
__UpperCamelCase = 2048
__UpperCamelCase = 2048
# set other default generation config params
__UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase = True
__UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(snake_case )
processor.push_to_hub(snake_case )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase__ : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 328 | 0 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = [0] * no_of_processes
_snake_case = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_SCREAMING_SNAKE_CASE ):
_snake_case = burst_time[i]
_snake_case = []
_snake_case = 0
_snake_case = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_snake_case = []
_snake_case = -1
for i in range(_SCREAMING_SNAKE_CASE ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_snake_case = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_snake_case = i
total_time += burst_time[target_process]
completed += 1
_snake_case = 0
_snake_case = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
_snake_case = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
__lowerCAmelCase = 4
__lowerCAmelCase = [2, 5, 3, 7]
__lowerCAmelCase = [0, 0, 0, 0]
__lowerCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''') | 270 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
__lowerCAmelCase = logging.getLogger(__name__)
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "masked_bert"
def __init__(self , UpperCAmelCase=30522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=0 , UpperCAmelCase="topK" , UpperCAmelCase="constant" , UpperCAmelCase=0.0 , **UpperCAmelCase , ) -> int:
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = pruning_method
_snake_case = mask_init
_snake_case = mask_scale | 270 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Dict = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase__ : Optional[str] = field(
default='./' ,metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' ,metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' ,metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size for training.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase__ : Optional[float] = field(default=0.1 ,metadata={'help': 'Value of weight decay.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_0_0_0 ,metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase__ : Optional[float] = field(default=2E-4 ,metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase__ : Optional[str] = field(default='cosine' ,metadata={'help': 'Learning rate.'} )
lowerCamelCase__ : Optional[int] = field(
default=7_5_0 ,metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_6 ,metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase__ : Optional[int] = field(default=5_0_0_0_0 ,metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0_2_4 ,metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Training seed.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_2_4 ,metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' ,metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0_2_4 ,metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=A__ ,metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase__ : Optional[float] = field(default=0.2 ,metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase__ : Optional[int] = field(default=2_5_6 ,metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase__ : Optional[int] = field(default=0 ,metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase__ : Optional[float] = field(default=0.9_5 ,metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0 ,metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase__ : Optional[int] = field(
default=2_0_0 ,metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase__ : Optional[str] = field(
default='eval_results.json' ,metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase__ : Optional[str] = field(
default='0' ,metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} ,)
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} ,)
lowerCamelCase__ : Optional[str] = field(
default='transformersbook/codeparrot' ,metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot-clean' ,metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_0_0_0_0 ,metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase__ : Optional[str] = field(default='content' ,metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase__ : Optional[float] = field(
default=1_0_0_0 ,metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=1_0_0 ,metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.2_5 ,metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=1.5 ,metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.7 ,metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Name or path to the tokenizer.'} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.8_5 ,metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='gpt2' ,metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase__ : Optional[str] = field(
default='transformersbook/codeparrot-train' ,metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase__ : Optional[str] = field(default='content' ,metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase__ : Optional[int] = field(default=2_0_0_0_0_0 ,metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase__ : Optional[int] = field(
default=3_2_7_6_8 ,metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase__ : Optional[str] = field(default='codeparrot' ,metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' ,metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase__ : Optional[str] = field(
default='tokenized-codeparrot-train' ,metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase__ : Optional[int] = field(default=A__ ,metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='gpt2-large' ,metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase__ : Optional[str] = field(default='codeparrot' ,metadata={'help': 'Name of the created model.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'Push saved tokenizer to the hub.'} )
| 165 | 1 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase__ : Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
UpperCamelCase__ : str = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ : int = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 51 |
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase__ : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase__ : int = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 | 1 |
'''simple docstring'''
class lowercase__ :
def __init__( self : Dict ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : Dict = n
_UpperCamelCase : Optional[int] = [None] * self.n
_UpperCamelCase : List[str] = 0 # index of the first element
_UpperCamelCase : Dict = 0
_UpperCamelCase : int = 0
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.size
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.size == 0
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_UpperCamelCase : Optional[int] = data
_UpperCamelCase : Tuple = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
_UpperCamelCase : List[Any] = self.array[self.front]
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 83 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : int )-> Any:
'''simple docstring'''
A__ = [[] for _ in range(lowercase_ )]
A__ = size
def __getitem__( self : List[str],lowercase_ : int )-> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
return self._size
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : int,lowercase_ : int )-> List[Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_,lowercase_ ) )
def snake_case__ ( self : Tuple,lowercase_ : int,lowercase_ : int )-> int | None:
'''simple docstring'''
A__ = deque([start_vertex] )
A__ = [None] * self.size
A__ = 0
while queue:
A__ = queue.popleft()
A__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A__ = current_distance + edge.weight
A__ = distances[edge.destination_vertex]
if (
isinstance(lowercase_,lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
A__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str],lowercase_ : List[str],lowercase_ : bool = True,lowercase_ : Dict[str, int] = None,lowercase_ : int = 3_2,lowercase_ : bool = True,lowercase_ : Union[int, float] = 1 / 2_5_5,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073],lowercase_ : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711],lowercase_ : bool = True,lowercase_ : Tuple=7,lowercase_ : str=3_0,lowercase_ : Union[str, Any]=4_0_0,lowercase_ : Dict=3,)-> List[Any]:
'''simple docstring'''
A__ = parent
A__ = do_resize
A__ = size if size is not None else {'shortest_edge': 2_8_8}
A__ = size_divisor
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = do_center_crop
A__ = image_mean
A__ = image_std
A__ = do_pad
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
def snake_case__ ( self : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case__ ( self : int,lowercase_ : Optional[int],lowercase_ : List[str]=False )-> Any:
'''simple docstring'''
if not batched:
A__ = self.size['shortest_edge']
A__ = image_inputs[0]
if isinstance(lowercase_,Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
A__ = size / min(lowercase_,lowercase_ )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
A__ = int((1_3_3_3 / 8_0_0) * size )
if max(lowercase_,lowercase_ ) > max_size:
A__ = max_size / max(lowercase_,lowercase_ )
A__ = newh * scale
A__ = neww * scale
A__ , A__ = int(newh + 0.5 ), int(neww + 0.5 )
A__ , A__ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase_,key=lambda lowercase_ : item[0] )[0]
A__ = max(lowercase_,key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
A__ = BridgeTowerImageProcessingTester(self )
@property
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_,'image_mean' ) )
self.assertTrue(hasattr(lowercase_,'image_std' ) )
self.assertTrue(hasattr(lowercase_,'do_normalize' ) )
self.assertTrue(hasattr(lowercase_,'do_resize' ) )
self.assertTrue(hasattr(lowercase_,'size' ) )
self.assertTrue(hasattr(lowercase_,'size_divisor' ) )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
| 282 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowercase = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=UpperCAmelCase__, cache_dir=UpperCAmelCase__ )
__lowercase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase__, os.listdir(UpperCAmelCase__ )[0], "snapshots" ) )]
__lowercase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=UpperCAmelCase__ )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 4
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
__lowercase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase__ ) == num_samples
def _lowercase ( self : int ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=UpperCAmelCase__ )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__ )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def _lowercase ( self : Optional[int] ):
__lowercase = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=UpperCAmelCase__, steps_offset=1, )
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, )
__lowercase = scheduler.create_state()
__lowercase = scheduler_state
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def _lowercase ( self : Dict ):
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = jax.random.split(jax.random.PRNGKey(0 ), UpperCAmelCase__ )
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__, )
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__, use_memory_efficient_attention=UpperCAmelCase__, )
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 17 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , lowerCamelCase , )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = RobertaConfig
snake_case_ = """roberta"""
def __init__( self : Any , __lowercase : Union[str, Any] ) -> Optional[int]:
super().__init__(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =RobertaEmbeddings(__lowercase )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , lowerCamelCase , )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = RobertaConfig
snake_case_ = """roberta"""
def __init__( self : Tuple , __lowercase : Dict ) -> Dict:
super().__init__(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =config.num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] =config.num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] =DeeRobertaModel(__lowercase )
SCREAMING_SNAKE_CASE__ : int =nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ : Dict =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__lowercase )
def __magic_name__ ( self : str , __lowercase : Optional[int]=None , __lowercase : Tuple=None , __lowercase : List[str]=None , __lowercase : Optional[int]=None , __lowercase : Optional[Any]=None , __lowercase : Optional[Any]=None , __lowercase : List[str]=None , __lowercase : Optional[int]=-1 , __lowercase : str=False , ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] =self.num_layers
try:
SCREAMING_SNAKE_CASE__ : List[str] =self.roberta(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =outputs[1]
SCREAMING_SNAKE_CASE__ : Optional[int] =self.dropout(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =self.classifier(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =e.message
SCREAMING_SNAKE_CASE__ : Any =e.exit_layer
SCREAMING_SNAKE_CASE__ : List[Any] =outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entropy(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : str =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : Optional[int] =MSELoss()
SCREAMING_SNAKE_CASE__ : str =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : List[Any] =CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : List[str] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE__ : Any =[]
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =highway_exit[0]
if not self.training:
highway_logits_all.append(__lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : List[str] =MSELoss()
SCREAMING_SNAKE_CASE__ : int =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : Dict =CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : Optional[int] =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowercase )
if train_highway:
SCREAMING_SNAKE_CASE__ : str =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE__ : List[str] =(loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE__ : Tuple =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE__ : str =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy | 152 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase : str = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = ['ViTFeatureExtractor']
_lowercase : Union[str, Any] = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( enum.Enum ):
a__ : str = 0
a__ : List[Any] = 1
a__ : str = 2
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Optional[Any] , *_lowercase : Any , **_lowercase : Optional[int] ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__UpperCAmelCase = None
if self.model.config.prefix is not None:
__UpperCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__UpperCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
__UpperCAmelCase = {**self._preprocess_params, **preprocess_params}
__UpperCAmelCase = {**self._forward_params, **forward_params}
def a ( self : Any , _lowercase : Optional[Any]=None , _lowercase : List[str]=None , _lowercase : int=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : List[Any]=None , **_lowercase : str , ):
__UpperCAmelCase = {}
if prefix is not None:
__UpperCAmelCase = prefix
if prefix:
__UpperCAmelCase = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
__UpperCAmelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
__UpperCAmelCase = handle_long_generation
preprocess_params.update(_lowercase )
__UpperCAmelCase = generate_kwargs
__UpperCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__UpperCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__UpperCAmelCase = ReturnType.TENSORS
if return_type is not None:
__UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCAmelCase = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a ( self : Optional[int] , *_lowercase : Optional[int] , **_lowercase : Any ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self : List[str] , _lowercase : str , **_lowercase : Optional[Any] ):
return super().__call__(_lowercase , **_lowercase )
def a ( self : Union[str, Any] , _lowercase : Any , _lowercase : Dict="" , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ):
__UpperCAmelCase = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
__UpperCAmelCase = prompt_text
if handle_long_generation == "hole":
__UpperCAmelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__UpperCAmelCase = generate_kwargs['''max_new_tokens''']
else:
__UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__UpperCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__UpperCAmelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__UpperCAmelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def a ( self : Union[str, Any] , _lowercase : List[str] , **_lowercase : Optional[int] ):
__UpperCAmelCase = model_inputs['''input_ids''']
__UpperCAmelCase = model_inputs.get('''attention_mask''' , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = 1
else:
__UpperCAmelCase = input_ids.shape[0]
__UpperCAmelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__UpperCAmelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__UpperCAmelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__UpperCAmelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__UpperCAmelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__UpperCAmelCase = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
__UpperCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
__UpperCAmelCase = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__UpperCAmelCase = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Optional[int]=ReturnType.FULL_TEXT , _lowercase : List[str]=True ):
__UpperCAmelCase = model_outputs['''generated_sequence'''][0]
__UpperCAmelCase = model_outputs['''input_ids''']
__UpperCAmelCase = model_outputs['''prompt_text''']
__UpperCAmelCase = generated_sequence.numpy().tolist()
__UpperCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__UpperCAmelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__UpperCAmelCase = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
__UpperCAmelCase = prompt_text + text[prompt_length:]
else:
__UpperCAmelCase = text[prompt_length:]
__UpperCAmelCase = {'''generated_text''': all_text}
records.append(_lowercase )
return records
| 86 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase_ : Dict ,lowercase_ : Dict=7 ,lowercase_ : Optional[int]=3 ,lowercase_ : int=3_0 ,lowercase_ : Optional[Any]=4_0_0 ,lowercase_ : Any=True ,lowercase_ : List[str]=None ,lowercase_ : str=True ,lowercase_ : List[Any]=[0.5, 0.5, 0.5] ,lowercase_ : List[str]=[0.5, 0.5, 0.5] ,lowercase_ : Any=True ,lowercase_ : Union[str, Any]=1 / 2_5_5 ,lowercase_ : str=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : Optional[Any] = min_resolution
lowerCAmelCase__ : Union[str, Any] = max_resolution
lowerCAmelCase__ : Optional[int] = do_resize
lowerCAmelCase__ : str = size
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : List[str] = image_mean
lowerCAmelCase__ : str = image_std
lowerCAmelCase__ : Optional[Any] = do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor
lowerCAmelCase__ : Optional[Any] = do_pad
def __lowerCAmelCase ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[Any] ,lowercase_ : int=False ):
if not batched:
lowerCAmelCase__ : Tuple = image_inputs[0]
if isinstance(lowercase_ ,Image.Image ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = image.size
else:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ : Any = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase__ : str = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase__ : Union[str, Any] = self.size['''shortest_edge''']
lowerCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase__ : List[str] = self.size['''shortest_edge''']
lowerCAmelCase__ : str = self.size['''shortest_edge''']
else:
lowerCAmelCase__ : Optional[Any] = []
for image in image_inputs:
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ : List[str] = max(lowercase_ ,key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase__ : Any = max(lowercase_ ,key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = DetaImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = DetaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase_ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_rescale''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_pad''' ) )
self.assertTrue(hasattr(lowercase_ ,'''size''' ) )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad ,lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
pass
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,Image.Image )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCAmelCase ( self : Dict ):
# Initialize image_processing
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : str = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : str = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __lowerCAmelCase ( self : Tuple ):
# prepare image and target
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' ,'''r''' ) as f:
lowerCAmelCase__ : Union[str, Any] = json.loads(f.read() )
lowerCAmelCase__ : str = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCAmelCase__ : Optional[Any] = DetaImageProcessor()
lowerCAmelCase__ : Optional[int] = image_processing(images=lowercase_ ,annotations=lowercase_ ,return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase__ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape ,lowercase_ )
lowerCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase_ ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase_ ) )
# verify boxes
lowerCAmelCase__ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase_ )
lowerCAmelCase__ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase_ ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase_ ) )
# verify is_crowd
lowerCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase_ ) )
# verify class_labels
lowerCAmelCase__ : Any = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase_ ) )
# verify orig_size
lowerCAmelCase__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase_ ) )
# verify size
lowerCAmelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase_ ) )
@slow
def __lowerCAmelCase ( self : Any ):
# prepare image, target and masks_path
lowerCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' ,'''r''' ) as f:
lowerCAmelCase__ : str = json.loads(f.read() )
lowerCAmelCase__ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCAmelCase__ : Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase__ : str = DetaImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase__ : Optional[int] = image_processing(images=lowercase_ ,annotations=lowercase_ ,masks_path=lowercase_ ,return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase__ : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape ,lowercase_ )
lowerCAmelCase__ : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase_ ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Tuple = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase_ ) )
# verify boxes
lowerCAmelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase_ )
lowerCAmelCase__ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase_ ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase_ ) )
# verify is_crowd
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase_ ) )
# verify class_labels
lowerCAmelCase__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase_ ) )
# verify masks
lowerCAmelCase__ : Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() ,lowercase_ )
# verify orig_size
lowerCAmelCase__ : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase_ ) )
# verify size
lowerCAmelCase__ : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase_ ) )
| 106 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any="pt" ) -> List[str]:
SCREAMING_SNAKE_CASE_ = {'add_prefix_space': True} if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE_ = padding_side
return tokenizer(
[line] , max_length=__UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = input_ids.ne(__UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any]="train" , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]="" , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE_ = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE_ = max_source_length
SCREAMING_SNAKE_CASE_ = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE_ = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = tgt_lang
def __len__( self : Tuple ):
return len(self.src_lens )
def __getitem__( self : List[str] , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE_ = self.prefix + linecache.getline(str(self.src_file ) , _lowerCAmelCase ).rstrip('\n' )
SCREAMING_SNAKE_CASE_ = linecache.getline(str(self.tgt_file ) , _lowerCAmelCase ).rstrip('\n' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
)
SCREAMING_SNAKE_CASE_ = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
SCREAMING_SNAKE_CASE_ = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE_ = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE_ = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : Optional[int] ):
return [len(_lowerCAmelCase ) for x in Path(_lowerCAmelCase ).open().readlines()]
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE_ = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ = trim_batch(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = trim_batch(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCamelCase__ : List[str] = getLogger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : List[List] ) -> Tuple:
return list(itertools.chain.from_iterable(__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE_ = get_git_info()
save_json(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'git_log.json' ) )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=4 , **__UpperCAmelCase : Tuple ) -> str:
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
with open(__UpperCAmelCase ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase_ ( ) -> Tuple:
SCREAMING_SNAKE_CASE_ = git.Repo(search_parent_directories=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'repo_id': str(__UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __UpperCAmelCase : Callable , __UpperCAmelCase : Iterable ) -> List:
return list(map(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Dict:
with open(__UpperCAmelCase , 'wb' ) as f:
return pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Any:
def remove_articles(__UpperCAmelCase : Any ):
return re.sub(r'\b(a|an|the)\b' , ' ' , __UpperCAmelCase )
def white_space_fix(__UpperCAmelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__UpperCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCAmelCase : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCAmelCase ) ) ) )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = normalize_answer(__UpperCAmelCase ).split()
SCREAMING_SNAKE_CASE_ = normalize_answer(__UpperCAmelCase ).split()
SCREAMING_SNAKE_CASE_ = Counter(__UpperCAmelCase ) & Counter(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Any:
return normalize_answer(__UpperCAmelCase ) == normalize_answer(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Dict:
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 0
for hypo, pred in zip(__UpperCAmelCase , __UpperCAmelCase ):
em += exact_match_score(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
em /= len(__UpperCAmelCase )
return {"em": em}
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
return model_prefix.startswith('rag' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE_ = 'dropout_rate'
for p in extra_params:
if getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if not hasattr(__UpperCAmelCase , __UpperCAmelCase ) and not hasattr(__UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
continue
SCREAMING_SNAKE_CASE_ = p if hasattr(__UpperCAmelCase , __UpperCAmelCase ) else equivalent_param[p]
setattr(__UpperCAmelCase , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
return hparams, config | 225 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : List[str] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = ['PoolFormerFeatureExtractor']
a_ : Optional[Any] = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 327 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a_ : Optional[int] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
a_ : Optional[int] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = list(state_dict.keys())
for name in state_dict_keys:
SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase)
# emb -> embedding
if name.startswith('emb.'):
SCREAMING_SNAKE_CASE = name.replace('emb.' , 'embeddings.')
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0'):
SCREAMING_SNAKE_CASE = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln')
# att -> attention
SCREAMING_SNAKE_CASE = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , _UpperCAmelCase)
# ffn -> feed_forward
SCREAMING_SNAKE_CASE = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , _UpperCAmelCase)
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_k' , '.time_mix_key')
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_v' , '.time_mix_value')
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_r' , '.time_mix_receptance')
if name != "head.weight":
SCREAMING_SNAKE_CASE = 'rwkv.' + name
SCREAMING_SNAKE_CASE = weight
return state_dict
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=None):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.')
SCREAMING_SNAKE_CASE = 5_0277
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b')
else:
SCREAMING_SNAKE_CASE = PreTrainedTokenizerFast(tokenizer_file=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
# 2. Build the config
SCREAMING_SNAKE_CASE = list(NUM_HIDDEN_LAYERS_MAPPING.keys())
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
SCREAMING_SNAKE_CASE = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.')
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''')
SCREAMING_SNAKE_CASE = RwkvConfig(
vocab_size=_UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCAmelCase)
# 3. Download model file then convert state_dict
SCREAMING_SNAKE_CASE = hf_hub_download(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = convert_state_dict(_UpperCAmelCase)
# 4. Split in shards and save
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = shard_checkpoint(_UpperCAmelCase)
for shard_file, shard in shards.items():
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
if index is not None:
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase)
# Save the index as well
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase) + '\n'
f.write(_UpperCAmelCase)
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.')
SCREAMING_SNAKE_CASE = list(shards.keys())
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
SCREAMING_SNAKE_CASE = torch.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase))
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.')
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase)
model.push_to_hub(_UpperCAmelCase , max_shard_size='2GB')
tokenizer.push_to_hub(_UpperCAmelCase)
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
a_ : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 327 | 1 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
_lowercase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowercase )} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_lowerCamelCase: float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowerCamelCase: float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_lowerCamelCase: int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_lowerCamelCase: int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _snake_case ( snake_case__ : DataTrainingArguments , snake_case__ : PreTrainedTokenizer , snake_case__ : bool = False , snake_case__ : Optional[str] = None , ):
def _dataset(snake_case__ : Tuple , snake_case__ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , ref_path=snake_case__ , )
return LineByLineTextDataset(tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(snake_case__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
A = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
A = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
A = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
A = AutoModelWithLMHead.from_config(snake_case__ )
model.resize_token_embeddings(len(snake_case__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
A = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
A = min(data_args.block_size , tokenizer.max_len )
# Get datasets
A = (
get_dataset(snake_case__ , tokenizer=snake_case__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
A = (
get_dataset(snake_case__ , tokenizer=snake_case__ , evaluate=snake_case__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
A = DataCollatorForPermutationLanguageModeling(
tokenizer=snake_case__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
A = DataCollatorForWholeWordMask(
tokenizer=snake_case__ , mlm_probability=data_args.mlm_probability )
else:
A = DataCollatorForLanguageModeling(
tokenizer=snake_case__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A = Trainer(
model=snake_case__ , args=snake_case__ , data_collator=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , prediction_loss_only=snake_case__ , )
# Training
if training_args.do_train:
A = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=snake_case__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = math.exp(eval_output['eval_loss'] )
A = {'perplexity': perplexity}
A = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(snake_case__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , snake_case__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(snake_case__ )
return results
def _snake_case ( snake_case__ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 74 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Any:
snake_case : Optional[int] = WavaVecaForSequenceClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[str] = downstream_dict["""projector.weight"""]
snake_case : Dict = downstream_dict["""projector.bias"""]
snake_case : Dict = downstream_dict["""model.post_net.linear.weight"""]
snake_case : List[Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[str]:
snake_case : str = WavaVecaForAudioFrameClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[Any] = downstream_dict["""model.linear.weight"""]
snake_case : str = downstream_dict["""model.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : Any = WavaVecaForXVector.from_pretrained(lowercase ,config=lowercase )
snake_case : str = downstream_dict["""connector.weight"""]
snake_case : Optional[Any] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case : List[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
snake_case : Optional[int] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
snake_case : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case : Any = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
snake_case : Tuple = torch.load(lowercase ,map_location="""cpu""" )
snake_case : Any = checkpoint["""Downstream"""]
snake_case : List[str] = WavaVecaConfig.from_pretrained(lowercase )
snake_case : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
lowercase ,return_attention_mask=lowercase ,do_normalize=lowercase )
snake_case : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case : int = convert_classification(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case : Dict = convert_diarization(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForXVector""" ):
snake_case : Optional[Any] = convert_xvector(lowercase ,lowercase ,lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
snake_case : List[str] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 124 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ : Optional[Any] = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PIL.Image.BICUBIC , _snake_case = True , _snake_case = None , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = size if size is not None else {'height': 2_56, 'width': 2_56}
lowerCAmelCase = get_size_dict(_snake_case )
lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase = get_size_dict(_snake_case , param_name='crop_size' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PIL.Image.BICUBIC , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
_snake_case , size=(size['height'], size['width']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case=None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(_snake_case )
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(_snake_case , param_name='crop_size' )
lowerCAmelCase = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 357 |
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309 | 0 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( SCREAMING_SNAKE_CASE__ ):
@staticmethod
def A__ ( A__):
lowercase = parser.add_parser('''env''')
download_parser.set_defaults(func=A__)
def A__ ( self):
lowercase = huggingface_hub.__version__
lowercase = '''not installed'''
lowercase = '''NA'''
if is_torch_available():
import torch
lowercase = torch.__version__
lowercase = torch.cuda.is_available()
lowercase = '''not installed'''
if is_transformers_available():
import transformers
lowercase = transformers.__version__
lowercase = '''not installed'''
if is_accelerate_available():
import accelerate
lowercase = accelerate.__version__
lowercase = '''not installed'''
if is_xformers_available():
import xformers
lowercase = xformers.__version__
lowercase = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f'{pt_version} ({pt_cuda_available})',
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''')
print(self.format_dict(A__))
return info
@staticmethod
def A__ ( A__):
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()]) + "\n"
| 101 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A: List[str] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = ["DPTFeatureExtractor"]
A: int = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A: str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCamelCase__ : Optional[int] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
a = np.argmax(snake_case_, axis=1 )
return np.sum(outputs == labels )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
with open(snake_case_, encoding='''utf_8''' ) as f:
a = csv.reader(snake_case_ )
a = []
next(snake_case_ ) # skip the first line
for line in tqdm(snake_case_ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = []
for dataset in encoded_datasets:
a = len(snake_case_ )
a = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
a = np.zeros((n_batch, 2), dtype=np.intaa )
a = np.full((n_batch, 2, input_len), fill_value=-1_0_0, dtype=np.intaa )
a = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(snake_case_ ):
a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a = with_conta
a = with_conta
a = len(snake_case_ ) - 1
a = len(snake_case_ ) - 1
a = with_conta
a = with_conta
a = mc_label
a = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(snake_case_ ) for t in all_inputs ) )
return tensor_datasets
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a = argparse.ArgumentParser()
parser.add_argument('''--model_name''', type=snake_case_, default='''openai-gpt''', help='''pretrained model name''' )
parser.add_argument('''--do_train''', action='''store_true''', help='''Whether to run training.''' )
parser.add_argument('''--do_eval''', action='''store_true''', help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''', default=snake_case_, type=snake_case_, required=snake_case_, help='''The output directory where the model predictions and checkpoints will be written.''', )
parser.add_argument('''--train_dataset''', type=snake_case_, default='''''' )
parser.add_argument('''--eval_dataset''', type=snake_case_, default='''''' )
parser.add_argument('''--seed''', type=snake_case_, default=4_2 )
parser.add_argument('''--num_train_epochs''', type=snake_case_, default=3 )
parser.add_argument('''--train_batch_size''', type=snake_case_, default=8 )
parser.add_argument('''--eval_batch_size''', type=snake_case_, default=1_6 )
parser.add_argument('''--adam_epsilon''', default=1e-8, type=snake_case_, help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''', type=snake_case_, default=1 )
parser.add_argument(
'''--max_steps''', default=-1, type=snake_case_, help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
), )
parser.add_argument(
'''--gradient_accumulation_steps''', type=snake_case_, default=1, help='''Number of updates steps to accumulate before performing a backward/update pass.''', )
parser.add_argument('''--learning_rate''', type=snake_case_, default=6.25e-5 )
parser.add_argument('''--warmup_steps''', default=0, type=snake_case_, help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''', type=snake_case_, default='''warmup_linear''' )
parser.add_argument('''--weight_decay''', type=snake_case_, default=0.01 )
parser.add_argument('''--lm_coef''', type=snake_case_, default=0.9 )
parser.add_argument('''--n_valid''', type=snake_case_, default=3_7_4 )
parser.add_argument('''--server_ip''', type=snake_case_, default='''''', help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''', type=snake_case_, default='''''', help='''Can be used for distant debugging.''' )
a = parser.parse_args()
print(snake_case_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=snake_case_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
a = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(snake_case_, snake_case_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
a = ['''_start_''', '''_delimiter_''', '''_classify_''']
a = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(snake_case_ )
a = tokenizer.convert_tokens_to_ids(snake_case_ )
a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(snake_case_ ) )
model.to(snake_case_ )
# Load and encode the datasets
def tokenize_and_encode(snake_case_ ):
if isinstance(snake_case_, snake_case_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case_ ) )
elif isinstance(snake_case_, snake_case_ ):
return obj
return [tokenize_and_encode(snake_case_ ) for o in obj]
logger.info('''Encoding dataset...''' )
a = load_rocstories_dataset(args.train_dataset )
a = load_rocstories_dataset(args.eval_dataset )
a = (train_dataset, eval_dataset)
a = tokenize_and_encode(snake_case_ )
# Compute the max input length for the Transformer
a = model.config.n_positions // 2 - 2
a = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
a = min(snake_case_, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
a = pre_process_datasets(snake_case_, snake_case_, snake_case_, *snake_case_ )
a , a = tensor_datasets[0], tensor_datasets[1]
a = TensorDataset(*snake_case_ )
a = RandomSampler(snake_case_ )
a = DataLoader(snake_case_, sampler=snake_case_, batch_size=args.train_batch_size )
a = TensorDataset(*snake_case_ )
a = SequentialSampler(snake_case_ )
a = DataLoader(snake_case_, sampler=snake_case_, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
a = args.max_steps
a = args.max_steps // (len(snake_case_ ) // args.gradient_accumulation_steps) + 1
else:
a = len(snake_case_ ) // args.gradient_accumulation_steps * args.num_train_epochs
a = list(model.named_parameters() )
a = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
a = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
a = AdamW(snake_case_, lr=args.learning_rate, eps=args.adam_epsilon )
a = get_linear_schedule_with_warmup(
snake_case_, num_warmup_steps=args.warmup_steps, num_training_steps=snake_case_ )
if args.do_train:
a , a , a = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc='''Epoch''' ):
a = 0
a = 0
a = tqdm(snake_case_, desc='''Training''' )
for step, batch in enumerate(snake_case_ ):
a = tuple(t.to(snake_case_ ) for t in batch )
a , a , a , a = batch
a = model(snake_case_, mc_token_ids=snake_case_, lm_labels=snake_case_, mc_labels=snake_case_ )
a = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
a = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
a = '''Training loss: {:.2e} lr: {:.2e}'''.format(snake_case_, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
a = model.module if hasattr(snake_case_, '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
a = os.path.join(args.output_dir, snake_case_ )
a = os.path.join(args.output_dir, snake_case_ )
torch.save(model_to_save.state_dict(), snake_case_ )
model_to_save.config.to_json_file(snake_case_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
a = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(snake_case_ )
if args.do_eval:
model.eval()
a , a = 0, 0
a , a = 0, 0
for batch in tqdm(snake_case_, desc='''Evaluating''' ):
a = tuple(t.to(snake_case_ ) for t in batch )
a , a , a , a = batch
with torch.no_grad():
a , a , a , a = model(
snake_case_, mc_token_ids=snake_case_, lm_labels=snake_case_, mc_labels=snake_case_ )
a = mc_logits.detach().cpu().numpy()
a = mc_labels.to('''cpu''' ).numpy()
a = accuracy(snake_case_, snake_case_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
a = eval_loss / nb_eval_steps
a = eval_accuracy / nb_eval_examples
a = tr_loss / nb_tr_steps if args.do_train else None
a = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
a = os.path.join(args.output_dir, '''eval_results.txt''' )
with open(snake_case_, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''', snake_case_, str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 330 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]:
"""simple docstring"""
a = list(snake_case_ )
a = list(snake_case_ )
a = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
a = '''_'''
if count > 1:
return False
else:
return "".join(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
while True:
a = ['''$'''] * len(snake_case_ )
a = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1, len(snake_case_ ) ):
a = compare_string(binary[i], binary[j] )
if k is False:
a = '''*'''
a = '''*'''
temp.append('''X''' )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
a = list(set(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
for minterm in minterms:
a = ''''''
for _ in range(snake_case_ ):
a = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool:
"""simple docstring"""
a = list(snake_case_ )
a = list(snake_case_ )
a = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
a = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
a = 0
a = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
a = j
if count == 1:
a = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
a = 0
temp.append(prime_implicants[i] )
while True:
a = 0
a = -1
a = 0
for i in range(len(snake_case_ ) ):
a = chart[i].count(1 )
if count_n > max_n:
a = count_n
a = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
a = 0
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]:
"""simple docstring"""
a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
a = prime_implicants[i].count('''_''' )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i], binary[j], snake_case_ ):
a = 1
return chart
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
a = int(input('''Enter the no. of variables\n''' ) )
a = [
float(snake_case_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
a = decimal_to_binary(snake_case_, snake_case_ )
a = check(snake_case_ )
print('''Prime Implicants are:''' )
print(snake_case_ )
a = prime_implicant_chart(snake_case_, snake_case_ )
a = selection(snake_case_, snake_case_ )
print('''Essential Prime Implicants are:''' )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 330 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ : int = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Tuple:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__UpperCamelCase , )
assert hasattr(self , 'env' )
def __a ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = {
'enabled': True,
'processes_per_host': 8,
}
snake_case__ : Any = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
snake_case__ : Optional[int] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
snake_case__ : int = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} , metric_definitions=self.env.metric_definitions , distribution=__UpperCamelCase , py_version='py36' , )
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
TrainingJobAnalytics(__UpperCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = self.create_estimator(__UpperCamelCase )
# run training
estimator.fit()
# result dataframe
snake_case__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case__ : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
snake_case__ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __UpperCamelCase )
| 143 | 1 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__UpperCAmelCase : Dict = Vector()
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase ) , 4 )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = Vector([1, 2] )
__UpperCAmelCase : Optional[Any] = Vector([1, 2, 3, 4, 5] )
__UpperCAmelCase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__UpperCAmelCase : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Vector([1, 2, 3] )
__UpperCAmelCase : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = Vector([1, 2, 3] )
__UpperCAmelCase : List[str] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : int = Vector([1, 2, 3] )
__UpperCAmelCase : Any = Vector([2, -1, 4] ) # for test of dot product
__UpperCAmelCase : int = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = Vector([1, 2, 3] )
__UpperCAmelCase : Dict = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase , UpperCamelCase ) ) , """(3,4,7)""" )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = Vector([1, 0, 0, 0, 0, 0] )
__UpperCAmelCase : str = x.copy()
self.assertEqual(str(UpperCamelCase ) , str(UpperCamelCase ) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase ) , """(0,1,0)""" )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase ) )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCAmelCase : Optional[int] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase , UpperCamelCase ) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCAmelCase : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase , UpperCamelCase ) )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__UpperCAmelCase : Optional[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase ) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCAmelCase : Tuple = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCAmelCase : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 358 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
pass
def lowerCamelCase ( _UpperCamelCase : Image ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = np.array(_UpperCamelCase )
__UpperCAmelCase : List[Any] = npimg.shape
return {"hash": hashimage(_UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__a = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
__UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = """facebook/sam-vit-huge"""
__UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase )
__UpperCAmelCase : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : Dict = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] , )
| 320 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ :int = get_tests_dir('''fixtures''')
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =mock.Mock()
__UpperCamelCase : List[Any] =500
__UpperCamelCase : List[str] ={}
__UpperCamelCase : str =HTTPError
__UpperCamelCase : Optional[int] ={}
# Download this model to make sure it's in the cache.
__UpperCamelCase : int =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase__ ) as mock_head:
__UpperCamelCase : Optional[Any] =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls ):
"""simple docstring"""
__UpperCamelCase : Any =TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def __lowercase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
__UpperCamelCase : List[str] =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase__ , repo_id='test-feature-extractor' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
__UpperCamelCase : List[Any] =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
__UpperCamelCase : Union[str, Any] =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase__ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
__UpperCamelCase : Tuple =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase : Tuple =CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
__UpperCamelCase : int =AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 71 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A ( a_ ,a_ ) -> Optional[Any]:
# Load checkpoint
__UpperCamelCase : int =torch.load(a_ ,map_location='cpu' )
__UpperCamelCase : List[Any] =chkpt['model']
# We have the base model one level deeper than the original XLM repository
__UpperCamelCase : str ={}
for k, v in state_dict.items():
if "pred_layer" in k:
__UpperCamelCase : Optional[Any] =v
else:
__UpperCamelCase : Optional[Any] =v
__UpperCamelCase : List[Any] =chkpt['params']
__UpperCamelCase : str ={n: v for n, v in config.items() if not isinstance(a_ ,(torch.FloatTensor, numpy.ndarray) )}
__UpperCamelCase : str =chkpt['dico_word2id']
__UpperCamelCase : Dict ={s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' ,'' ): i for s, i in vocab.items()}
# Save pytorch-model
__UpperCamelCase : List[Any] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase : Tuple =pytorch_dump_folder_path + '/' + CONFIG_NAME
__UpperCamelCase : Any =pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(a_ ,a_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(a_ ,indent=2 ) + '\n' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(a_ ,indent=2 ) + '\n' )
if __name__ == "__main__":
A_ :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ :List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 71 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if not is_accelerate_available():
return method
snake_case_ = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCamelCase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self , *UpperCamelCase__ , **UpperCamelCase__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCamelCase__ , **UpperCamelCase__ )
return wrapper
| 200 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case_ = max(
mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , )
snake_case_ = val
return f[i][j]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case_ = dp[i - 1][w_]
return dp[n][w_], dp
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
snake_case_ = len(UpperCamelCase__ )
if num_items != len(UpperCamelCase__ ):
snake_case_ = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(UpperCamelCase__ )} values'''
)
raise ValueError(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
if not isinstance(wt[i] , UpperCamelCase__ ):
snake_case_ = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(UpperCamelCase__ )
snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = set()
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return optimal_val, example_optional_set
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
optimal_set.add(UpperCamelCase__ )
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : int = [3, 2, 4, 4]
_UpperCAmelCase : Tuple = [4, 3, 2, 3]
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : int = 6
_UpperCAmelCase : Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 200 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_:Dict = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE_:int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "mask2former"
__lowerCamelCase : List[str] = ["swin"]
__lowerCamelCase : str = {"hidden_size": "hidden_dim"}
def __init__( self, lowerCamelCase__ = None, lowerCamelCase__ = 256, lowerCamelCase__ = 256, lowerCamelCase__ = 256, lowerCamelCase__ = 1024, lowerCamelCase__ = "relu", lowerCamelCase__ = 6, lowerCamelCase__ = 10, lowerCamelCase__ = 8, lowerCamelCase__ = 0.0, lowerCamelCase__ = 2048, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = 4, lowerCamelCase__ = 255, lowerCamelCase__ = 100, lowerCamelCase__ = 0.1, lowerCamelCase__ = 2.0, lowerCamelCase__ = 5.0, lowerCamelCase__ = 5.0, lowerCamelCase__ = 1_2544, lowerCamelCase__ = 3.0, lowerCamelCase__ = 0.75, lowerCamelCase__ = 0.02, lowerCamelCase__ = 1.0, lowerCamelCase__ = True, lowerCamelCase__ = [4, 8, 16, 32], lowerCamelCase__ = None, **lowerCamelCase__, ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
A : Tuple = CONFIG_MAPPING["""swin"""](
image_size=224, in_channels=3, patch_size=4, embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=lowerCamelCase__, out_features=["""stage1""", """stage2""", """stage3""", """stage4"""], )
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : int = backbone_config.pop("""model_type""" )
A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A : str = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
A : Optional[int] = backbone_config
A : Dict = feature_size
A : Dict = mask_feature_size
A : int = hidden_dim
A : int = encoder_feedforward_dim
A : Union[str, Any] = activation_function
A : List[str] = encoder_layers
A : int = decoder_layers
A : List[str] = num_attention_heads
A : Tuple = dropout
A : Tuple = dim_feedforward
A : Any = pre_norm
A : Dict = enforce_input_projection
A : str = common_stride
A : Optional[Any] = ignore_value
A : Any = num_queries
A : int = no_object_weight
A : Dict = class_weight
A : str = mask_weight
A : Dict = dice_weight
A : Dict = train_num_points
A : Dict = oversample_ratio
A : str = importance_sample_ratio
A : Optional[int] = init_std
A : Optional[int] = init_xavier_std
A : str = use_auxiliary_loss
A : int = feature_strides
A : Dict = output_auxiliary_logits
A : Union[str, Any] = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, **lowerCamelCase__ ):
return cls(
backbone_config=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : List[str] = copy.deepcopy(self.__dict__ )
A : Any = self.backbone_config.to_dict()
A : Any = self.__class__.model_type
return output
| 116 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """transformers""", """onnx"""] )
| 116 | 1 |
from __future__ import annotations
import math
def _A ( __magic_name__ ):
if num <= 0:
lowercase__ = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(__magic_name__ )
lowercase__ = [True] * (num + 1)
lowercase__ = []
lowercase__ = 2
lowercase__ = int(math.sqrt(__magic_name__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__magic_name__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , __magic_name__ ):
if sieve[i] is True:
lowercase__ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__magic_name__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 201 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( __magic_name__=32 , __magic_name__=10 , __magic_name__=100 , __magic_name__=1026 , __magic_name__=True , __magic_name__="data/tokenized_stories_train_wikitext103.jbl" , __magic_name__="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
__magic_name__ , __magic_name__ , number=__magic_name__ , min_len=1026 , trim=__magic_name__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowercase__ = load_gpta("gpt2" ).to(__magic_name__ )
print("computing perplexity on objective set" )
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ ).item()
print("perplexity on objective set:" , __magic_name__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( __magic_name__ , __magic_name__=15 , __magic_name__=128 , __magic_name__=100 , __magic_name__="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(__magic_name__ )
# Train secondary learner
lowercase__ = train_secondary_learner(
__magic_name__ , __magic_name__ , max_epochs=__magic_name__ , batch_size=__magic_name__ , eval_freq=100 , igf_model_path=__magic_name__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=32 , __magic_name__=1000 , __magic_name__=16 , __magic_name__=1.0 , __magic_name__=recopy_gpta , __magic_name__=None , __magic_name__=10 , __magic_name__="gpt2_finetuned.pt" , ):
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowercase__ = RandomSampler(__magic_name__ )
lowercase__ = DataLoader(__magic_name__ , sampler=__magic_name__ )
lowercase__ = max_steps // (len(__magic_name__ )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__magic_name__ )
lowercase__ , lowercase__ , lowercase__ = recopy_model(__magic_name__ , __magic_name__ , __magic_name__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(__magic_name__ )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
for epoch in range(int(__magic_name__ ) ):
for step, example in enumerate(__magic_name__ ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(__magic_name__ , labels=__magic_name__ )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(__magic_name__ , dtype=torch.long , device=__magic_name__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__magic_name__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __magic_name__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__magic_name__ , default=__magic_name__ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__magic_name__ , default=__magic_name__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__magic_name__ , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__magic_name__ , default=__magic_name__ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=__magic_name__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=__magic_name__ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=__magic_name__ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=__magic_name__ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=__magic_name__ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=__magic_name__ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=__magic_name__ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=__magic_name__ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=__magic_name__ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=__magic_name__ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__magic_name__ , type=__magic_name__ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__magic_name__ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__magic_name__ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__magic_name__ , type=__magic_name__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__magic_name__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowercase__ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowercase__ = training_secondary_learner(
__magic_name__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=__magic_name__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__magic_name__ , __magic_name__ , __magic_name__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__magic_name__ , secondary_learner=__magic_name__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 201 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
'''simple docstring'''
def __init__( self):
"""simple docstring"""
lowerCAmelCase = """"""
lowerCAmelCase = """"""
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 256
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = cva.imread(__lowerCAmelCase , 0)
lowerCAmelCase = copy.deepcopy(self.img)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""")
lowerCAmelCase = np.sum(__lowerCAmelCase)
for i in range(len(__lowerCAmelCase)):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last)
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last)
self.last_list.append(__lowerCAmelCase)
lowerCAmelCase = int(np.ma.count(self.img) / self.img[1].size)
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols):
for j in range(self.number_of_rows):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img)
def a_ ( self):
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256])
def a_ ( self):
"""simple docstring"""
cva.imshow("""Output-Image""" , self.img)
cva.imshow("""Input-Image""" , self.original_image)
cva.waitKey(5000)
cva.destroyAllWindows()
if __name__ == "__main__":
__lowercase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__lowercase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 272 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 1 |
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCamelCase_ : Optional[int] = 3_00 # TEMPERATURE (unit = K)
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> float:
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase_ : Any = 5_00_00
lowerCamelCase_ : Any = 50_00
lowerCamelCase_ , lowerCamelCase_ : Tuple = os.path.split(__file__)
lowerCamelCase_ : int = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
for i in range(lowerCamelCase ):
UpperCamelCase_: Dict = dataset[i]
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase ):
UpperCamelCase_: List[Any] = dataset[i : i + batch_size]
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
with dataset.formatted_as(type=lowerCamelCase ):
for i in range(lowerCamelCase ):
UpperCamelCase_: List[str] = dataset[i]
@get_duration
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
with dataset.formatted_as(type=lowerCamelCase ):
for i in range(0 , lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = dataset[i : i + batch_size]
def A__ ( ) -> Tuple:
UpperCamelCase_: int = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCamelCase_: Union[str, Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
UpperCamelCase_: Tuple = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCamelCase_: int = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCamelCase_: Optional[int] = generate_example_dataset(
os.path.join(lowerCamelCase , """dataset.arrow""" ) , lowerCamelCase , num_examples=lowerCamelCase , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCamelCase ) )
UpperCamelCase_: List[Any] = func(lowerCamelCase , **lowerCamelCase )
print("""shuffling dataset""" )
UpperCamelCase_: Dict = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(lowerCamelCase ) )
UpperCamelCase_: List[Any] = func(
lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 223 | 1 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowerCamelCase : Tuple = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class SCREAMING_SNAKE_CASE ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str = " " ):
"""simple docstring"""
UpperCamelCase = sentence_delimiter
def A ( self : List[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
return list(UpperCamelCase__ )
def A ( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowerCamelCase : int = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowerCamelCase : str = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowerCamelCase : Tuple = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_lowerCamelCase : int = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_lowerCamelCase : str = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def A ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 28 | import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''char'''
lowerCamelCase__ = '''bpe'''
lowerCamelCase__ = '''wp'''
A : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = '''MgpstrTokenizer'''
def __init__( self : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : int=None , **__magic_name__ : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("gpt2" )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : Dict , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=None , **__magic_name__ : Tuple ) -> int:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.char_tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings["input_ids"]
return inputs
def __A ( self : Tuple , __magic_name__ : int ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sequences
SCREAMING_SNAKE_CASE_ = char_preds.size(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "char" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "bpe" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "wp" )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE_ = scores.index(max(__magic_name__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = final_strs
SCREAMING_SNAKE_CASE_ = final_scores
SCREAMING_SNAKE_CASE_ = char_strs
SCREAMING_SNAKE_CASE_ = bpe_strs
SCREAMING_SNAKE_CASE_ = wp_strs
return out
def __A ( self : int , __magic_name__ : List[Any] , __magic_name__ : str ) -> Any:
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE_ = self.char_decode
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = "[s]"
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE_ = self.bpe_decode
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = "#"
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE_ = self.wp_decode
SCREAMING_SNAKE_CASE_ = 102
SCREAMING_SNAKE_CASE_ = "[SEP]"
else:
raise ValueError(F'''Format {format} is not supported.''' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], []
SCREAMING_SNAKE_CASE_ = pred_logits.size(0 )
SCREAMING_SNAKE_CASE_ = pred_logits.size(1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pred_logits.topk(1 , dim=-1 , largest=__magic_name__ , sorted=__magic_name__ )
SCREAMING_SNAKE_CASE_ = preds_index.view(-1 , __magic_name__ )[:, 1:]
SCREAMING_SNAKE_CASE_ = decoder(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.nn.functional.softmax(__magic_name__ , dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE_ = preds_max_prob[:, 1:]
for index in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = preds_str[index].find(__magic_name__ )
SCREAMING_SNAKE_CASE_ = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE_ = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE_ = pred_index.index(__magic_name__ ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE_ = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__magic_name__ )
conf_scores.append(__magic_name__ )
return dec_strs, conf_scores
def __A ( self : Any , __magic_name__ : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
def __A ( self : Any , __magic_name__ : Union[str, Any] ) -> Tuple:
return self.bpe_tokenizer.batch_decode(__magic_name__ )
def __A ( self : str , __magic_name__ : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
| 118 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
def snake_case_ ( snake_case , snake_case=1_00 , snake_case=" " ) -> List[Any]:
lowercase__: Tuple = text.split(lowerCAmelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )]
def snake_case_ ( snake_case ) -> Any:
lowercase__ , lowercase__: List[Any] = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(lowerCAmelCase__ ):
titles.append(title if title is not None else '' )
texts.append(lowerCAmelCase__ )
return {"title": titles, "text": texts}
def snake_case_ ( snake_case , snake_case , snake_case ) -> List[Any]:
lowercase__: int = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=lowerCAmelCase__ , padding='longest' , return_tensors='pt' )['input_ids']
lowercase__: str = ctx_encoder(input_ids.to(device=lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def snake_case_ ( snake_case , snake_case , snake_case , ) -> int:
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__: Any = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__: Optional[Any] = dataset.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase__: int = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCAmelCase__ )
lowercase__: List[str] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase__: Optional[Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase__: str = dataset.map(
partial(lowerCAmelCase__ , ctx_encoder=lowerCAmelCase__ , ctx_tokenizer=lowerCAmelCase__ ) , batched=lowerCAmelCase__ , batch_size=processing_args.batch_size , features=lowerCAmelCase__ , )
# And finally save your dataset
lowercase__: List[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(lowerCAmelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__: Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=lowerCAmelCase__ )
# And save the index
lowercase__: str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(lowerCAmelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __a :
__lowercase : str = field(
default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
__lowercase : str = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
__lowercase : str = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
__lowercase : Optional[str] = field(
default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __a :
__lowercase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
__lowercase : int = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __a :
__lowercase : int = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
__lowercase : int = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 359 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCAmelCase = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
__lowerCAmelCase = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = CHRF.CHAR_ORDER , lowerCAmelCase__ = CHRF.WORD_ORDER , lowerCAmelCase__ = CHRF.BETA , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> List[Any]:
'''simple docstring'''
lowercase__: str = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowercase__: List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
lowercase__: Union[str, Any] = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: str = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 288 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__UpperCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def _snake_case ( lowercase__ : str , lowercase__ : int=1_0_0 , lowercase__ : int=" " ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = text.split(lowercase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase__ ) , lowercase__ )]
def _snake_case ( lowercase__ : dict ) -> dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(lowercase__ ):
titles.append(title if title is not None else """""" )
texts.append(lowercase__ )
return {"title": titles, "text": texts}
def _snake_case ( lowercase__ : dict , lowercase__ : DPRContextEncoder , lowercase__ : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
lowerCAmelCase_ :Tuple = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=lowercase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCAmelCase_ :int = ctx_encoder(input_ids.to(device=lowercase__ ) , return_dict=lowercase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _snake_case ( lowercase__ : "RagExampleArguments" , lowercase__ : "ProcessingArguments" , lowercase__ : "IndexHnswArguments" , ) -> Optional[Any]:
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase_ :Tuple = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase_ :str = dataset.map(lowercase__ , batched=lowercase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase_ :Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase__ )
lowerCAmelCase_ :List[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase_ :str = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase_ :str = dataset.map(
partial(lowercase__ , ctx_encoder=lowercase__ , ctx_tokenizer=lowercase__ ) , batched=lowercase__ , batch_size=processing_args.batch_size , features=lowercase__ , )
# And finally save your dataset
lowerCAmelCase_ :Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(lowercase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase_ :Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=lowercase__ )
# And save the index
lowerCAmelCase_ :Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(lowercase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str = field(
default=str(Path(A__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
UpperCAmelCase_ :str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
UpperCAmelCase_ :str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
UpperCAmelCase_ :Optional[str] = field(
default=str(Path(A__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
UpperCAmelCase_ :int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
UpperCAmelCase_ :int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 84 |
'''simple docstring'''
_snake_case = 8.3_1_4_4_5_9_8
def _A ( snake_case , snake_case ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 250 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__A ='''\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'''
__A ='''\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'''
__A =R'''\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = 0.0
for i, j in zip(lowercase_ , lowercase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_ ) else 0.0
lowerCamelCase_ = n_correct / len(lowercase_ )
return {
"accuracy": accuracy,
}
| 351 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f"Building PyTorch model from configuration: {config}" )
__a = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 302 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302 | 1 |
"""simple docstring"""
import random
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ):
'''simple docstring'''
__lowerCAmelCase = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
"""simple docstring"""
from __future__ import annotations
import time
A : Union[str, Any] = list[tuple[int, int]]
A : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a ):
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = parent
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __a )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __a )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = False
def snake_case ( self ):
while self.node_queue:
__lowerCAmelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase = True
return self.retrace_path(__a )
__lowerCAmelCase = self.get_successors(__a )
for node in successors:
self.node_queue.append(__a )
if not self.reached:
return [self.start.pos]
return None
def snake_case ( self , __a ):
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__a , __a , self.target.pos_y , self.target.pos_x , __a ) )
return successors
def snake_case ( self , __a ):
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = BreadthFirstSearch(__a , __a )
__lowerCAmelCase = BreadthFirstSearch(__a , __a )
__lowerCAmelCase = False
def snake_case ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCAmelCase = self.fwd_bfs.node_queue.pop(0 )
__lowerCAmelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCAmelCase = True
return self.retrace_bidirectional_path(
__a , __a )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(__a ),
self.bwd_bfs: self.bwd_bfs.get_successors(__a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case ( self , __a , __a ):
__lowerCAmelCase = self.fwd_bfs.retrace_path(__a )
__lowerCAmelCase = self.bwd_bfs.retrace_path(__a )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A : List[Any] = (0, 0)
A : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Any = time.time()
A : Dict = BreadthFirstSearch(init, goal)
A : Any = bfs.search()
A : List[str] = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
A : Optional[Any] = time.time()
A : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
A : Any = bd_bfs.search()
A : str = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 259 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Optional[Any] = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['''CLIPFeatureExtractor''']
a : List[Any] = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : str ) -> Dict:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> Optional[Any]:
_a : Any =tmp_path / """cache"""
_a : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Tuple =SqlDatasetReader(
"""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> List[Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_a : Optional[int] =features.copy() if features else default_expected_features
_a : Union[str, Any] =(
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
_a : Any =con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Tuple =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write()
_a : Tuple =iter_sql_file(_UpperCAmelCase )
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
_a : int =tmp_path / """cache"""
_a : Any =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Union[str, Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write()
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
_a : str =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> List[str]:
_a : List[str] =tmp_path / """cache"""
_a : Dict =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
with pytest.raises(_UpperCAmelCase ):
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
| 276 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__UpperCamelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Optional[Any] , **UpperCamelCase: str ) -> Optional[int]:
super().__init__(**UpperCamelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self: Optional[int] , UpperCamelCase: Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase: List[Any] ) -> Optional[int]:
return super().__call__(UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Dict , **UpperCamelCase: int ) -> Dict:
snake_case__ = {}
if "candidate_labels" in kwargs:
snake_case__ = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
snake_case__ = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: List[Any]="This is a photo of {}." ) -> Tuple:
snake_case__ = load_image(UpperCamelCase )
snake_case__ = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case__ = candidate_labels
snake_case__ = [hypothesis_template.format(UpperCamelCase ) for x in candidate_labels]
snake_case__ = self.tokenizer(UpperCamelCase , return_tensors=self.framework , padding=UpperCamelCase )
snake_case__ = [text_inputs]
return inputs
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any ) -> Tuple:
snake_case__ = model_inputs.pop('candidate_labels' )
snake_case__ = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase ):
snake_case__ = text_inputs[0]
else:
# Batching case.
snake_case__ = text_inputs[0][0]
snake_case__ = self.model(**UpperCamelCase , **UpperCamelCase )
snake_case__ = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Optional[Any] ) -> str:
snake_case__ = model_outputs.pop('candidate_labels' )
snake_case__ = model_outputs['logits'][0]
if self.framework == "pt":
snake_case__ = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case__ = probs.tolist()
if not isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [scores]
elif self.framework == "tf":
snake_case__ = stable_softmax(UpperCamelCase , axis=-1 )
snake_case__ = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case__ = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase , UpperCamelCase ) , key=lambda UpperCamelCase : -x[0] )
]
return result
| 307 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : Tuple = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase : Dict = get_tests_dir('''fixtures''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : List[Any] = 500
_lowerCamelCase : Any = {}
_lowerCamelCase : int = HTTPError
_lowerCamelCase : List[str] = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" ,return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def _lowercase ( cls: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def _lowercase ( cls: Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("test-feature-extractor" ,use_auth_token=self._token )
_lowerCamelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase ,repo_id="test-feature-extractor" ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token )
_lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" ,use_auth_token=self._token )
_lowerCamelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase ,repo_id="valid_org/test-feature-extractor-org" ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token )
_lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
_lowerCamelCase : List[Any] = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} ,)
_lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" ,trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,"CustomFeatureExtractor" ) | 340 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : str = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCamelCase : List[Any] = value
elif weight_type == "bias":
_lowerCamelCase : str = value
elif weight_type == "running_mean":
_lowerCamelCase : Optional[int] = value
elif weight_type == "running_var":
_lowerCamelCase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase : int = value
elif weight_type == "inv_freq":
_lowerCamelCase : List[str] = value
else:
_lowerCamelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : int = True
if "*" in mapped_key:
_lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCamelCase : int = None
elif "pos_bias_v" in name:
_lowerCamelCase : Any = None
elif "weight_g" in name:
_lowerCamelCase : Any = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : Any = "weight_v"
elif "bias" in name:
_lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Dict = "weight"
elif "running_mean" in name:
_lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
_lowerCamelCase : List[Any] = "inv_freq"
elif "running_var" in name:
_lowerCamelCase : Tuple = "running_var"
elif "num_batches_tracked" in name:
_lowerCamelCase : str = "num_batches_tracked"
else:
_lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = full_name.split("conv_layers." )[-1]
_lowerCamelCase : List[Any] = name.split("." )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCamelCase : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : Optional[int] = target_dict.pad_index
_lowerCamelCase : Dict = target_dict.bos_index
_lowerCamelCase : Optional[Any] = target_dict.eos_index
_lowerCamelCase : str = len(target_dict.symbols )
_lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False
_lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" )
_lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 340 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(test_script.main )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
debug_launcher(test_ops.main )
| 109 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ['''image_processor''']
A__ : Any = '''SamImageProcessor'''
def __init__( self : Tuple , _snake_case : Tuple ):
super().__init__(_snake_case )
__lowercase : str = self.image_processor
__lowercase : Any = -10
__lowercase : Dict = self.image_processor.size['''longest_edge''']
def __call__( self : Dict , _snake_case : str=None , _snake_case : Any=None , _snake_case : List[str]=None , _snake_case : Any=None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : List[Any] , ):
__lowercase : List[str] = self.image_processor(
_snake_case , return_tensors=_snake_case , **_snake_case , )
# pop arguments that are not used in the foward but used nevertheless
__lowercase : Optional[int] = encoding_image_processor['''original_sizes''']
if hasattr(_snake_case , '''numpy''' ): # Checks if Torch or TF tensor
__lowercase : Optional[int] = original_sizes.numpy()
__lowercase , __lowercase , __lowercase : str = self._check_and_preprocess_points(
input_points=_snake_case , input_labels=_snake_case , input_boxes=_snake_case , )
__lowercase : int = self._normalize_and_convert(
_snake_case , _snake_case , input_points=_snake_case , input_labels=_snake_case , input_boxes=_snake_case , return_tensors=_snake_case , )
return encoding_image_processor
def snake_case_ ( self : List[str] , _snake_case : int , _snake_case : Optional[int] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=None , _snake_case : str="pt" , ):
if input_points is not None:
if len(_snake_case ) != len(_snake_case ):
__lowercase : Optional[Any] = [
self._normalize_coordinates(self.target_size , _snake_case , original_sizes[0] ) for point in input_points
]
else:
__lowercase : List[Any] = [
self._normalize_coordinates(self.target_size , _snake_case , _snake_case )
for point, original_size in zip(_snake_case , _snake_case )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowercase , __lowercase : Tuple = self._pad_points_and_labels(_snake_case , _snake_case )
__lowercase : Dict = np.array(_snake_case )
if input_labels is not None:
__lowercase : Dict = np.array(_snake_case )
if input_boxes is not None:
if len(_snake_case ) != len(_snake_case ):
__lowercase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , _snake_case , original_sizes[0] , is_bounding_box=_snake_case )
for box in input_boxes
]
else:
__lowercase : Tuple = [
self._normalize_coordinates(self.target_size , _snake_case , _snake_case , is_bounding_box=_snake_case )
for box, original_size in zip(_snake_case , _snake_case )
]
__lowercase : Dict = np.array(_snake_case )
if input_boxes is not None:
if return_tensors == "pt":
__lowercase : int = torch.from_numpy(_snake_case )
# boxes batch size of 1 by default
__lowercase : List[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowercase : Dict = tf.convert_to_tensor(_snake_case )
# boxes batch size of 1 by default
__lowercase : int = tf.expand_dims(_snake_case , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowercase : Tuple = torch.from_numpy(_snake_case )
# point batch size of 1 by default
__lowercase : Tuple = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowercase : List[Any] = tf.convert_to_tensor(_snake_case )
# point batch size of 1 by default
__lowercase : Optional[int] = tf.expand_dims(_snake_case , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowercase : int = torch.from_numpy(_snake_case )
# point batch size of 1 by default
__lowercase : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowercase : Any = tf.convert_to_tensor(_snake_case )
# point batch size of 1 by default
__lowercase : Union[str, Any] = tf.expand_dims(_snake_case , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def snake_case_ ( self : int , _snake_case : Any , _snake_case : str ):
__lowercase : Union[str, Any] = max([point.shape[0] for point in input_points] )
__lowercase : List[Any] = []
for i, point in enumerate(_snake_case ):
if point.shape[0] != expected_nb_points:
__lowercase : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__lowercase : Tuple = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_snake_case )
__lowercase : List[Any] = processed_input_points
return input_points, input_labels
def snake_case_ ( self : Dict , _snake_case : int , _snake_case : np.ndarray , _snake_case : Any , _snake_case : Any=False ):
__lowercase , __lowercase : Tuple = original_size
__lowercase , __lowercase : Optional[Any] = self.image_processor._get_preprocess_shape(_snake_case , longest_edge=_snake_case )
__lowercase : Optional[int] = deepcopy(_snake_case ).astype(_snake_case )
if is_bounding_box:
__lowercase : str = coords.reshape(-1 , 2 , 2 )
__lowercase : Dict = coords[..., 0] * (new_w / old_w)
__lowercase : int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowercase : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def snake_case_ ( self : List[str] , _snake_case : List[Any]=None , _snake_case : Any=None , _snake_case : int=None , ):
if input_points is not None:
if hasattr(_snake_case , '''numpy''' ): # Checks for TF or Torch tensor
__lowercase : Tuple = input_points.numpy().tolist()
if not isinstance(_snake_case , _snake_case ) or not isinstance(input_points[0] , _snake_case ):
raise ValueError('''Input points must be a list of list of floating points.''' )
__lowercase : str = [np.array(_snake_case ) for input_point in input_points]
else:
__lowercase : str = None
if input_labels is not None:
if hasattr(_snake_case , '''numpy''' ):
__lowercase : Any = input_labels.numpy().tolist()
if not isinstance(_snake_case , _snake_case ) or not isinstance(input_labels[0] , _snake_case ):
raise ValueError('''Input labels must be a list of list integers.''' )
__lowercase : List[Any] = [np.array(_snake_case ) for label in input_labels]
else:
__lowercase : Tuple = None
if input_boxes is not None:
if hasattr(_snake_case , '''numpy''' ):
__lowercase : str = input_boxes.numpy().tolist()
if (
not isinstance(_snake_case , _snake_case )
or not isinstance(input_boxes[0] , _snake_case )
or not isinstance(input_boxes[0][0] , _snake_case )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
__lowercase : List[Any] = [np.array(_snake_case ).astype(np.floataa ) for box in input_boxes]
else:
__lowercase : Dict = None
return input_points, input_labels, input_boxes
@property
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(_snake_case ) )
def snake_case_ ( self : str , *_snake_case : Union[str, Any] , **_snake_case : Dict ):
return self.image_processor.post_process_masks(*_snake_case , **_snake_case )
| 156 | 0 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase = 1000 ) -> int:
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 341 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> List[str]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> str:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> Tuple:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 69 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , _snake_case = 768 , ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = nn.Parameter(torch.zeros(1 , _snake_case ) )
_lowerCAmelCase = nn.Parameter(torch.ones(1 , _snake_case ) )
def snake_case ( self , _snake_case = None , _snake_case = None , ):
"""simple docstring"""
_lowerCAmelCase = nn.Parameter(self.mean.to(_snake_case ).to(_snake_case ) )
_lowerCAmelCase = nn.Parameter(self.std.to(_snake_case ).to(_snake_case ) )
return self
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = (embeds * self.std) + self.mean
return embeds
| 82 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def __lowerCAmelCase ( lowercase : list[int] , lowercase : list[int] , lowercase : int ) -> list[int]:
"""simple docstring"""
snake_case : Optional[Any] = [0] * no_of_processes
snake_case : List[Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase ):
snake_case : Optional[int] = burst_time[i]
snake_case : Optional[int] = 0
snake_case : List[str] = 0
snake_case : int = 9_9999_9999
snake_case : Tuple = 0
snake_case : Union[str, Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
snake_case : Any = remaining_time[j]
snake_case : str = j
snake_case : Any = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
snake_case : Any = remaining_time[short]
if minm == 0:
snake_case : List[Any] = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
snake_case : str = False
# Find finish time of current process
snake_case : str = increment_time + 1
# Calculate waiting time
snake_case : Optional[Any] = finish_time - arrival_time[short]
snake_case : Any = finar - burst_time[short]
if waiting_time[short] < 0:
snake_case : Tuple = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCAmelCase ( lowercase : list[int] , lowercase : int , lowercase : list[int] ) -> list[int]:
"""simple docstring"""
snake_case : Optional[int] = [0] * no_of_processes
for i in range(lowercase ):
snake_case : int = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCAmelCase ( lowercase : list[int] , lowercase : list[int] , lowercase : int ) -> None:
"""simple docstring"""
snake_case : Any = 0
snake_case : Union[str, Any] = 0
for i in range(lowercase ):
snake_case : List[str] = total_waiting_time + waiting_time[i]
snake_case : str = total_turn_around_time + turn_around_time[i]
print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
__snake_case = int(input())
__snake_case = [0] * no_of_processes
__snake_case = [0] * no_of_processes
__snake_case = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
__snake_case , __snake_case = map(int, input().split())
__snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case = burst_time
__snake_case = no_of_processes
__snake_case = waiting_time
__snake_case = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__snake_case = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 112 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
if number != int(lowercase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
snake_case : Optional[Any] = [-1] * (number + 1)
snake_case : str = 0
for i in range(1 , number + 1 ):
snake_case : List[Any] = sys.maxsize
snake_case : Union[str, Any] = int(math.sqrt(lowercase ) )
for j in range(1 , root + 1 ):
snake_case : List[str] = 1 + answers[i - (j**2)]
snake_case : Optional[Any] = min(lowercase , lowercase )
snake_case : Any = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
lowerCAmelCase : Optional[int] = self.model.config
else:
lowerCAmelCase : List[str] = config
lowerCAmelCase : Any = data_args
lowerCAmelCase : Tuple = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCAmelCase : Tuple = label_smoothed_nll_loss
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.optimizer is None:
lowerCAmelCase : Optional[int] = ["bias", "LayerNorm.weight"]
lowerCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCAmelCase : Union[str, Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCAmelCase : Dict = Adafactor
lowerCAmelCase : Optional[int] = {"scale_parameter": False, "relative_step": False}
else:
lowerCAmelCase : int = AdamW
lowerCAmelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
lowerCAmelCase : int = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
lowerCAmelCase : Any = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
lowerCAmelCase : Tuple = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCAmelCase : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCAmelCase : Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def lowercase__ ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCAmelCase : Dict = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCAmelCase , lowerCAmelCase : str = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
lowerCAmelCase : int = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
lowerCAmelCase , lowerCAmelCase : str = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = inputs.pop("labels" )
lowerCAmelCase , lowerCAmelCase : str = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self._prepare_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Dict = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
lowerCAmelCase : Optional[Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
lowerCAmelCase , lowerCAmelCase : Dict = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCAmelCase : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Optional[int] = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
lowerCAmelCase : Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCAmelCase : int = tensor
return padded_tensor
| 108 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__A =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Any = g.get_repo("""huggingface/diffusers""" )
UpperCAmelCase__ : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Any = sorted(issue.get_comments() , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main() | 365 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _snake_case :
lowerCAmelCase :str = PegasusConfig
lowerCAmelCase :Optional[int] = {}
lowerCAmelCase :Dict = '''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=40 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ):
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Union[str, Any] = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : Optional[int] = vocab_size
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = eos_token_id
UpperCAmelCase__ : Any = pad_token_id
UpperCAmelCase__ : Dict = bos_token_id
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
UpperCAmelCase__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
UpperCAmelCase__ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1)
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ : Union[str, Any] = prepare_pegasus_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return config, inputs_dict
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = TFPegasusModel(config=_lowerCamelCase).get_decoder()
UpperCAmelCase__ : Optional[int] = inputs_dict["""input_ids"""]
UpperCAmelCase__ : Any = input_ids[:1, :]
UpperCAmelCase__ : List[str] = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase__ : int = inputs_dict["""head_mask"""]
UpperCAmelCase__ : int = 1
# first forward pass
UpperCAmelCase__ : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase__ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
UpperCAmelCase__ : Tuple = tf.concat([input_ids, next_tokens] , axis=-1)
UpperCAmelCase__ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1)
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase)[0]
UpperCAmelCase__ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
UpperCAmelCase__ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1]))
UpperCAmelCase__ : int = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ):
if attention_mask is None:
UpperCAmelCase__ : Any = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase__ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCAmelCase :Any = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase :List[str] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase :int = True
lowerCAmelCase :Optional[int] = False
lowerCAmelCase :Dict = False
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = TFPegasusModelTester(self)
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_lowerCamelCase)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
lowerCAmelCase :Dict = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCAmelCase :Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCAmelCase :Tuple = '''google/pegasus-xsum'''
@cached_property
def snake_case__ ( self):
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def snake_case__ ( self , **_lowerCamelCase):
UpperCAmelCase__ : Dict = self.translate_src_text(**_lowerCamelCase)
assert self.expected_text == generated_words
def snake_case__ ( self , **_lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text , **_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""tf""")
UpperCAmelCase__ : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase)
return generated_words
@slow
def snake_case__ ( self):
self._assert_generated_batch_equal_expected() | 283 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : int
a_ : TreeNode | None = None
a_ : TreeNode | None = None
lowercase__ = namedtuple("""CoinsDistribResult""", """moves excess""")
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__UpperCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__UpperCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__UpperCamelCase ) != count_coins(__UpperCamelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__UpperCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = get_distrib(node.left )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = get_distrib(node.right )
lowerCAmelCase_ : int = 1 - left_distrib_excess
lowerCAmelCase_ : Optional[Any] = 1 - right_distrib_excess
lowerCAmelCase_ : Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(__UpperCamelCase )
+ abs(__UpperCamelCase )
)
lowerCAmelCase_ : Union[str, Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__UpperCamelCase , __UpperCamelCase )
return get_distrib(__UpperCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A = logging.get_logger(__name__)
__A = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__magic_name__ :Dict = '''trajectory_transformer'''
__magic_name__ :Union[str, Any] = ['''past_key_values''']
__magic_name__ :Tuple = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=2_4_9 , __UpperCAmelCase=6 , __UpperCAmelCase=1_7 , __UpperCAmelCase=2_5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.00_06 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=5_0_2_5_6 , __UpperCAmelCase=5_0_2_5_6 , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = vocab_size
lowerCAmelCase__ :Any = action_weight
lowerCAmelCase__ :Optional[Any] = reward_weight
lowerCAmelCase__ :List[Any] = value_weight
lowerCAmelCase__ :int = max_position_embeddings
lowerCAmelCase__ :Optional[Any] = block_size
lowerCAmelCase__ :int = action_dim
lowerCAmelCase__ :List[str] = observation_dim
lowerCAmelCase__ :List[str] = transition_dim
lowerCAmelCase__ :Optional[Any] = learning_rate
lowerCAmelCase__ :int = n_layer
lowerCAmelCase__ :Tuple = n_head
lowerCAmelCase__ :List[str] = n_embd
lowerCAmelCase__ :Dict = embd_pdrop
lowerCAmelCase__ :Dict = attn_pdrop
lowerCAmelCase__ :List[Any] = resid_pdrop
lowerCAmelCase__ :Optional[Any] = initializer_range
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Dict = kaiming_initializer_range
lowerCAmelCase__ :Optional[Any] = use_cache
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
| 359 |
"""simple docstring"""
from pathlib import Path
import fire
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = Path(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
lowerCAmelCase__ :Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase__ :Tuple = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open('w' ).write('\n'.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 254 | 0 |
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( __A : Image ) -> Image:
"""simple docstring"""
a_ , a_ : int = image.size
a_ : Any = 0
a_ : List[str] = image.load()
for i in range(__A ):
for j in range(__A ):
a_ : int = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
a_ : Union[str, Any] = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 32 | import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def A ( _lowercase , _lowercase , _lowercase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = is_small_dataset(_lowercase )
assert result == expected
| 182 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = CycleDiffusionPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
_a = PipelineTesterMixin.required_optional_params - {'latents'}
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self : int )-> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
lowerCamelCase__ : Any =DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='''scaled_linear''', num_train_timesteps=1000, clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase__ : str =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
lowerCamelCase__ : Dict =CLIPTextModel(lowerCamelCase )
lowerCamelCase__ : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase__ : List[str] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : List[Any]=0 )-> List[str]:
lowerCamelCase__ : Any =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCamelCase__ : Tuple =image / 2 + 0.5
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : Tuple =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : List[str] =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : List[Any] ={
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Union[str, Any] )-> List[Any]:
lowerCamelCase__ : Any ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Any =self.get_dummy_components()
lowerCamelCase__ : Tuple =CycleDiffusionPipeline(**lowerCamelCase )
lowerCamelCase__ : Tuple =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase__ : Any =pipe(**lowerCamelCase )
lowerCamelCase__ : str =output.images
lowerCamelCase__ : List[Any] =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase__ : Optional[int] =np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : Any =self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase, '''half''' ):
lowerCamelCase__ : Any =module.half()
lowerCamelCase__ : Any =CycleDiffusionPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase__ : Dict =pipe(**lowerCamelCase )
lowerCamelCase__ : List[Any] =output.images
lowerCamelCase__ : List[Any] =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase__ : Optional[int] =np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def snake_case ( self : List[str] )-> Any:
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def snake_case ( self : int )-> int:
return super().test_inference_batch_single_identical()
@skip_mps
def snake_case ( self : Dict )-> List[str]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case ( self : List[Any] )-> Any:
return super().test_save_load_optional_components()
@skip_mps
def snake_case ( self : Tuple )-> Any:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Union[str, Any] )-> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowerCamelCase__ : int =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
lowerCamelCase__ : int =init_image.resize((512, 512) )
lowerCamelCase__ : Union[str, Any] ='''CompVis/stable-diffusion-v1-4'''
lowerCamelCase__ : Tuple =DDIMScheduler.from_pretrained(lowerCamelCase, subfolder='''scheduler''' )
lowerCamelCase__ : str =CycleDiffusionPipeline.from_pretrained(
lowerCamelCase, scheduler=lowerCamelCase, safety_checker=lowerCamelCase, torch_dtype=torch.floataa, revision='''fp16''' )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : Dict ='''A black colored car'''
lowerCamelCase__ : int ='''A blue colored car'''
lowerCamelCase__ : Optional[Any] =torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =pipe(
prompt=lowerCamelCase, source_prompt=lowerCamelCase, image=lowerCamelCase, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=lowerCamelCase, output_type='''np''', )
lowerCamelCase__ : List[str] =output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def snake_case ( self : Union[str, Any] )-> str:
lowerCamelCase__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowerCamelCase__ : Any =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
lowerCamelCase__ : Union[str, Any] =init_image.resize((512, 512) )
lowerCamelCase__ : List[str] ='''CompVis/stable-diffusion-v1-4'''
lowerCamelCase__ : List[str] =DDIMScheduler.from_pretrained(lowerCamelCase, subfolder='''scheduler''' )
lowerCamelCase__ : Optional[int] =CycleDiffusionPipeline.from_pretrained(lowerCamelCase, scheduler=lowerCamelCase, safety_checker=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : str ='''A black colored car'''
lowerCamelCase__ : Optional[int] ='''A blue colored car'''
lowerCamelCase__ : Optional[int] =torch.manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(
prompt=lowerCamelCase, source_prompt=lowerCamelCase, image=lowerCamelCase, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=lowerCamelCase, output_type='''np''', )
lowerCamelCase__ : List[Any] =output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 370 |
"""simple docstring"""
from collections import defaultdict
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : List[str] )-> Optional[int]:
lowerCamelCase__ : List[Any] =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase__ : Optional[Any] =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase ) )
]
lowerCamelCase__ : Any =defaultdict(lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase__ : List[Any] =(1 << len(lowerCamelCase )) - 1
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : Any )-> Any:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase__ : Optional[int] =self.count_ways_until(lowerCamelCase, task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 )
# save the value.
lowerCamelCase__ : int =total_ways_util
return self.dp[mask][task_no]
def snake_case ( self : Dict, lowerCamelCase : Dict )-> int:
# Store the list of persons for each task
for i in range(len(lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1 )
if __name__ == "__main__":
_lowercase : Tuple = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowercase : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 272 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''dpr'''
def __init__( self :Dict , __magic_name__ :Tuple=3_0522 , __magic_name__ :List[str]=768 , __magic_name__ :Union[str, Any]=12 , __magic_name__ :Optional[Any]=12 , __magic_name__ :Optional[Any]=3072 , __magic_name__ :str="gelu" , __magic_name__ :Tuple=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Dict=512 , __magic_name__ :str=2 , __magic_name__ :Tuple=0.02 , __magic_name__ :Dict=1E-1_2 , __magic_name__ :Optional[Any]=0 , __magic_name__ :str="absolute" , __magic_name__ :int = 0 , **__magic_name__ :Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = projection_dim
a = position_embedding_type
| 228 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = CLIPTokenizer
UpperCamelCase__ = CLIPTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {}
UpperCamelCase__ = False
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().setUp()
# fmt: off
a = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
a = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
a = {"""unk_token""": """<unk>"""}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :int , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :int , __magic_name__ :List[str] ):
'''simple docstring'''
a = """lower newer"""
a = """lower newer"""
return input_text, output_text
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = """lower newer"""
a = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
a = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokens + [tokenizer.unk_token]
a = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@require_ftfy
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
a = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
a = tokenizer_s.tokenize(__magic_name__ )
a = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a = """xa\u0303y""" + """ """ + """x\xe3y"""
a = tokenizer_s.tokenize(__magic_name__ )
a = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of space type
a = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a = tokenizer_s.tokenize(__magic_name__ )
a = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of line break type
a = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a = tokenizer_s.tokenize(__magic_name__ )
a = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
a = F'{text_of_1_token} {text_of_1_token}'
a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
a = F' {text}'
a = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
a = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
with self.assertRaises(__magic_name__ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
| 228 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
@staticmethod
def _lowercase (*_A : Any , **_A : List[Any]) -> Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
UpperCAmelCase : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _lowercase (self : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any]) -> Tuple:
__snake_case : Optional[int] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection')
__snake_case : Optional[int] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _lowercase (self : Optional[int] , _A : Optional[int] , _A : Union[str, Any]) -> Tuple:
__snake_case : Optional[int] = object_detector(examples[0] , threshold=0.0)
__snake_case : Tuple = len(__UpperCAmelCase)
self.assertGreater(__UpperCAmelCase , 0)
self.assertEqual(
__UpperCAmelCase , [
{
'score': ANY(__UpperCAmelCase),
'label': ANY(__UpperCAmelCase),
'box': {'xmin': ANY(__UpperCAmelCase), 'ymin': ANY(__UpperCAmelCase), 'xmax': ANY(__UpperCAmelCase), 'ymax': ANY(__UpperCAmelCase)},
}
for i in range(__UpperCAmelCase)
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF')
def _lowercase (self : Dict) -> Tuple:
pass
@require_torch
def _lowercase (self : str) -> Dict:
__snake_case : Union[str, Any] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection')
__snake_case : Optional[int] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4) , [
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
__snake_case : str = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4) , [
[
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _lowercase (self : Union[str, Any]) -> Tuple:
__snake_case : Tuple = pipeline('zero-shot-object-detection')
__snake_case : List[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
__snake_case : Any = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4) , [
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF')
def _lowercase (self : Optional[Any]) -> List[str]:
pass
@require_torch
@slow
def _lowercase (self : str) -> List[str]:
__snake_case : Optional[int] = 0.2
__snake_case : List[Any] = pipeline('zero-shot-object-detection')
__snake_case : Optional[int] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _lowercase (self : Dict) -> List[Any]:
__snake_case : Optional[Any] = 2
__snake_case : Optional[int] = pipeline('zero-shot-object-detection')
__snake_case : List[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 366 | """simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Union[str, Any]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : str= "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Tuple:
__snake_case : Optional[Any] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=_A , required=_A , help='Model\'s type.')
train_parser.add_argument(
'--tf_checkpoint' , type=_A , required=_A , help='TensorFlow checkpoint path or folder.')
train_parser.add_argument(
'--pytorch_dump_output' , type=_A , required=_A , help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config' , type=_A , default='' , help='Configuration file path or folder.')
train_parser.add_argument(
'--finetuning_task_name' , type=_A , default=_A , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=_A)
def __init__(self : List[str] , _A : str , _A : str , _A : str , _A : str , _A : str , *_A : Any , ) -> Optional[Any]:
__snake_case : List[Any] = logging.get_logger('transformers-cli/converting')
self._logger.info(f"Loading model {model_type}")
__snake_case : List[str] = model_type
__snake_case : int = tf_checkpoint
__snake_case : Optional[int] = pytorch_dump_output
__snake_case : Optional[Any] = config
__snake_case : Optional[Any] = finetuning_task_name
def _lowercase (self : List[str]) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
if "ckpt" in self._tf_checkpoint.lower():
__snake_case : Union[str, Any] = self._tf_checkpoint
__snake_case : List[Any] = ''
else:
__snake_case : Optional[Any] = self._tf_checkpoint
__snake_case : List[Any] = ''
convert_transfo_xl_checkpoint_to_pytorch(
_A , self._config , self._pytorch_dump_output , _A)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
| 95 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool , _lowerCAmelCase : list[int] , _lowerCAmelCase : float ):
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , )
)
def a_ ( ):
'''simple docstring'''
lowercase__ : str = [90, 23, 6, 33, 21, 65, 123, 3_4423]
lowercase__ : Tuple = math.log(len(_lowerCAmelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 77 | """simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = args.pruning_method
lowercase__ : Tuple = args.threshold
lowercase__ : str = args.model_name_or_path.rstrip('/' )
lowercase__ : List[Any] = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowercase__ : Optional[Any] = torch.load(os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
lowercase__ : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ : Tuple = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase__ : List[str] = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowercase__ : Optional[Any] = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase__ : Optional[Any] = MagnitudeBinarizer.apply(inputs=_lowerCAmelCase , threshold=_lowerCAmelCase )
lowercase__ : Optional[int] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ : Optional[Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Any = TopKBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ : Any = name[:-6]
lowercase__ : Optional[Any] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Tuple = ThresholdBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[str] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ : Union[str, Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ , lowercase__ : Tuple = -0.1, 1.1
lowercase__ : Optional[Any] = torch.sigmoid(_lowerCAmelCase )
lowercase__ : Optional[Any] = s * (r - l) + l
lowercase__ : Optional[Any] = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowercase__ : Union[str, Any] = os.path.join(
os.path.dirname(_lowerCAmelCase ) , f"""bertarized_{os.path.basename(_lowerCAmelCase )}""" )
if not os.path.isdir(_lowerCAmelCase ):
shutil.copytree(_lowerCAmelCase , _lowerCAmelCase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_UpperCamelCase : Dict = parser.parse_args()
main(args)
| 77 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.txt'}
UpperCAmelCase_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCAmelCase_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
UpperCAmelCase_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ConvBertTokenizer
def __init__( self: Dict , UpperCamelCase_: Tuple=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[Any]="[UNK]" , UpperCamelCase_: Any="[SEP]" , UpperCamelCase_: List[Any]="[PAD]" , UpperCamelCase_: Dict="[CLS]" , UpperCamelCase_: List[Any]="[MASK]" , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase_ ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(UpperCamelCase_ , normalizer_state.pop("""type""" ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**UpperCamelCase_ )
__lowerCamelCase = do_lower_case
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str=None ):
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
__lowerCamelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 29 |
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __magic_name__ ( unittest.TestCase):
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Dict = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
lowercase_ : Union[str, Any] = load_dataset("""ashraq/esc50""" )
lowercase_ : Any = dataset["""train"""]["""audio"""][-1]["""array"""]
lowercase_ : str = audio_classifier(lowercase_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"""score""": 0.5_01, """label""": """Sound of a dog"""}, {"""score""": 0.4_99, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Dict = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
lowercase_ : Tuple = load_dataset("""ashraq/esc50""" )
lowercase_ : Union[str, Any] = dataset["""train"""]["""audio"""][-1]["""array"""]
lowercase_ : Optional[Any] = audio_classifier(lowercase_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"""score""": 0.9_99, """label""": """Sound of a dog"""},
{"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""},
] , )
lowercase_ : Any = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.9_99, """label""": """Sound of a dog"""},
{"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
lowercase_ : Optional[int] = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.9_99, """label""": """Sound of a dog"""},
{"""score""": 0.0_01, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
pass
| 239 | '''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class __magic_name__ :
def __init__( self : Any ):
lowercase_ : list[Any] = []
lowercase_ : int = 0
lowercase_ : int = 0
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.head == self.tail
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Any ):
self.data.append(lowercase_ )
lowercase_ : Optional[Any] = self.tail + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = self.data[self.head]
lowercase_ : Optional[int] = self.head + 1
return ret
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return self.tail - self.head
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class __magic_name__ :
def __init__( self : Any , lowercase_ : Any ):
lowercase_ : Optional[Any] = data
lowercase_ : MyNode | None = None
lowercase_ : MyNode | None = None
lowercase_ : int = 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return self.data
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return self.left
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self.right
def SCREAMING_SNAKE_CASE_ ( self : str ):
return self.height
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Dict = data
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : MyNode | None ):
lowercase_ : Optional[Any] = node
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : MyNode | None ):
lowercase_ : str = node
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : int ):
lowercase_ : Tuple = height
def lowerCamelCase ( UpperCAmelCase__ : MyNode | None ) -> int:
if node is None:
return 0
return node.get_height()
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if a > b:
return a
return b
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
print("""left rotation node:""" , node.get_data() )
lowercase_ : Union[str, Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase__ )
lowercase_ : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
lowercase_ : Optional[int] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
print("""right rotation node:""" , node.get_data() )
lowercase_ : Dict = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase__ )
lowercase_ : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
lowercase_ : str = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
lowercase_ : Optional[Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase__ ) )
return right_rotation(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> MyNode:
lowercase_ : Optional[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase__ ) )
return left_rotation(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : MyNode | None , UpperCAmelCase__ : Any ) -> MyNode | None:
if node is None:
return MyNode(UpperCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , UpperCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowercase_ : List[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowercase_ : List[Any] = right_rotation(UpperCAmelCase__ )
else:
lowercase_ : Any = lr_rotation(UpperCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , UpperCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowercase_ : Any = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowercase_ : Optional[int] = rl_rotation(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = left_rotation(UpperCAmelCase__ )
lowercase_ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
return node
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> Any:
while True:
lowercase_ : Any = root.get_right()
if right_child is None:
break
lowercase_ : List[str] = right_child
return root.get_data()
def lowerCamelCase ( UpperCAmelCase__ : MyNode ) -> Any:
while True:
lowercase_ : List[Any] = root.get_left()
if left_child is None:
break
lowercase_ : str = left_child
return root.get_data()
def lowerCamelCase ( UpperCAmelCase__ : MyNode , UpperCAmelCase__ : Any ) -> MyNode | None:
lowercase_ : Union[str, Any] = root.get_left()
lowercase_ : int = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowercase_ : Any = get_left_most(UpperCAmelCase__ )
root.set_data(UpperCAmelCase__ )
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
elif left_child is not None:
lowercase_ : Dict = left_child
elif right_child is not None:
lowercase_ : Dict = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
if get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowercase_ : str = left_rotation(UpperCAmelCase__ )
else:
lowercase_ : List[str] = rl_rotation(UpperCAmelCase__ )
elif get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowercase_ : Optional[Any] = right_rotation(UpperCAmelCase__ )
else:
lowercase_ : List[str] = lr_rotation(UpperCAmelCase__ )
lowercase_ : int = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase__ )
return root
class __magic_name__ :
def __init__( self : List[str] ):
lowercase_ : MyNode | None = None
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return get_height(self.root )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
print("""insert:""" + str(lowercase_ ) )
lowercase_ : Dict = insert_node(self.root , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
print("""delete:""" + str(lowercase_ ) )
if self.root is None:
print("""Tree is empty!""" )
return
lowercase_ : int = del_node(self.root , lowercase_ )
def __str__( self : Optional[int] , ): # a level traversale, gives a more intuitive look on the tree
lowercase_ : int = """"""
lowercase_ : Optional[int] = MyQueue()
q.push(self.root )
lowercase_ : Optional[Any] = self.get_height()
if layer == 0:
return output
lowercase_ : Optional[int] = 0
while not q.is_empty():
lowercase_ : Any = q.pop()
lowercase_ : List[Any] = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowercase_ )
q.push(lowercase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowercase_ : Union[str, Any] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowercase_ ) - 1:
lowercase_ : Optional[Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCamelCase ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_lowercase : Optional[Any] = AVLtree()
_lowercase : Optional[Any] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 239 | 1 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : int =PriorTransformer
a : Tuple ='''hidden_states'''
@property
def _a ( self ):
UpperCamelCase_: Optional[int] = 4
UpperCamelCase_: str = 8
UpperCamelCase_: Optional[Any] = 7
UpperCamelCase_: List[str] = floats_tensor((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCamelCase_: Any = floats_tensor((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCamelCase_: List[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _a ( self , _lowerCamelCase=0 ):
torch.manual_seed(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = 4
UpperCamelCase_: Dict = 8
UpperCamelCase_: Optional[int] = 7
UpperCamelCase_: str = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCamelCase_: List[str] = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCamelCase_: Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _a ( self ):
return (4, 8)
@property
def _a ( self ):
return (4, 8)
def _a ( self ):
UpperCamelCase_: Union[str, Any] = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
UpperCamelCase_: Tuple = self.dummy_input
return init_dict, inputs_dict
def _a ( self ):
UpperCamelCase_: Dict = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _a ( self ):
UpperCamelCase_: Any = self.prepare_init_args_and_inputs_for_common()
UpperCamelCase_: Dict = self.model_class(**_lowerCamelCase )
UpperCamelCase_: str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: Tuple = [*signature.parameters.keys()]
UpperCamelCase_: int = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
UpperCamelCase_: Tuple = model.to(_lowerCamelCase )
if hasattr(_lowerCamelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
UpperCamelCase_: Optional[Any] = self.get_dummy_seed_input()
with torch.no_grad():
UpperCamelCase_: Dict = model(**_lowerCamelCase )[0]
UpperCamelCase_: Optional[Any] = output[0, :5].flatten().cpu()
print(_lowerCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCamelCase_: Optional[int] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) )
@slow
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self , _lowerCamelCase=1 , _lowerCamelCase=7_6_8 , _lowerCamelCase=7_7 , _lowerCamelCase=0 ):
torch.manual_seed(_lowerCamelCase )
UpperCamelCase_: int = batch_size
UpperCamelCase_: Any = embedding_dim
UpperCamelCase_: str = num_embeddings
UpperCamelCase_: Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCamelCase_: str = torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCamelCase_: str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[3_7, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.get_dummy_seed_input(seed=_lowerCamelCase )
with torch.no_grad():
UpperCamelCase_: Tuple = model(**_lowerCamelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
UpperCamelCase_: Dict = sample[0, :8].flatten().cpu()
print(_lowerCamelCase )
UpperCamelCase_: Dict = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) | 366 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A_ : Tuple = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def snake_case (UpperCAmelCase__ ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCamelCase_: List[str] = k.replace(UpperCAmelCase__ , UpperCAmelCase__ )
return k
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> PegasusForConditionalGeneration:
UpperCamelCase_: List[str] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase__ )
UpperCamelCase_: Tuple = PegasusConfig(**UpperCAmelCase__ )
UpperCamelCase_: Tuple = PegasusForConditionalGeneration(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = torch_model.model.state_dict()
UpperCamelCase_: str = {}
for k, v in tf_weights.items():
UpperCamelCase_: Dict = rename_state_dict_key(UpperCAmelCase__ )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
UpperCamelCase_: int = v.T
UpperCamelCase_: Union[str, Any] = torch.tensor(UpperCAmelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
UpperCamelCase_: Tuple = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCamelCase_: int = mapping['shared.weight']
UpperCamelCase_: Union[str, Any] = mapping['shared.weight']
UpperCamelCase_: Dict = {k: torch.zeros_like(UpperCAmelCase__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = torch_model.model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
UpperCamelCase_: List[str] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def snake_case (UpperCAmelCase__="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCamelCase_: Union[str, Any] = tf.train.list_variables(UpperCAmelCase__ )
UpperCamelCase_: Tuple = {}
UpperCamelCase_: Dict = ['Adafactor', 'global_step']
for name, shape in tqdm(UpperCAmelCase__ , desc='converting tf checkpoint to dict' ):
UpperCamelCase_: Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase_: Dict = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = array
return tf_weights
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
# save tokenizer first
UpperCamelCase_: Any = Path(UpperCAmelCase__ ).parent.name
UpperCamelCase_: Tuple = task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
UpperCamelCase_: Optional[Any] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=UpperCAmelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase__ )
# convert model
UpperCamelCase_: Optional[Any] = get_tf_weights_as_numpy(UpperCAmelCase__ )
UpperCamelCase_: Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
UpperCamelCase_: Union[str, Any] = task_specific_params
UpperCamelCase_: Tuple = convert_pegasus(UpperCAmelCase__ , UpperCAmelCase__ )
torch_model.save_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(UpperCAmelCase__ , Path(UpperCAmelCase__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Optional[Any] = parser.parse_args()
if args.save_dir is None:
A_ : Union[str, Any] = Path(args.tf_ckpt_path).parent.name
A_ : Optional[Any] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 292 | 0 |
def __UpperCAmelCase ( __a : Tuple = 10**12 ) -> Optional[int]:
"""simple docstring"""
_a : Any = 1
_a : Tuple = 0
_a : Dict = 1
_a : Optional[int] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235 | """simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ = '''upernet'''
def __init__( self , lowercase=None , lowercase=512 , lowercase=0.02 , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=384 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**UpperCamelCase_ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCamelCase : int = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowerCamelCase : Optional[Any] = backbone_config.get('model_type' )
_lowerCamelCase : int = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Optional[Any] = config_class.from_dict(UpperCamelCase_ )
_lowerCamelCase : List[Any] = backbone_config
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : str = pool_scales
_lowerCamelCase : Dict = use_auxiliary_head
_lowerCamelCase : List[str] = auxiliary_loss_weight
_lowerCamelCase : Optional[int] = auxiliary_in_channels
_lowerCamelCase : int = auxiliary_channels
_lowerCamelCase : Union[str, Any] = auxiliary_num_convs
_lowerCamelCase : Union[str, Any] = auxiliary_concat_input
_lowerCamelCase : str = loss_ignore_index
def A_ ( self ):
_lowerCamelCase : List[str] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
_lowerCamelCase : Any = self.__class__.model_type
return output | 354 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 0 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__)
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ):
def run_func(UpperCAmelCase_ : Any ):
@wraps(SCREAMING_SNAKE_CASE_ )
def run_in_eager_mode(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@wraps(SCREAMING_SNAKE_CASE_ )
@tf.function(experimental_compile=SCREAMING_SNAKE_CASE_ )
def run_in_graph_mode(*UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
A__ = random.Random()
A__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(SCREAMING_SNAKE_CASE_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = 4_2
UpperCAmelCase = 4_2
UpperCAmelCase = "TensorFlow"
@property
def UpperCamelCase ( self: Any ):
"""simple docstring"""
return tf.__version__
def UpperCamelCase ( self: List[Any] , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
A__ = self._prepare_inference_func(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return self._measure_speed(_inference )
def UpperCamelCase ( self: Any , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
A__ = self._prepare_train_func(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return self._measure_speed(_train )
def UpperCamelCase ( self: str , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase )
A__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
A__ = self._prepare_inference_func(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return self._measure_memory(_inference )
def UpperCamelCase ( self: List[str] , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase )
A__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
A__ = self._prepare_train_func(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return self._measure_memory(_train )
def UpperCamelCase ( self: Dict , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
A__ = (
hasattr(UpperCamelCase , """architectures""" )
and isinstance(config.architectures , UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A__ = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
A__ = __import__("""transformers""" , fromlist=[model_class] )
A__ = getattr(UpperCamelCase , UpperCamelCase )
A__ = model_cls(UpperCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
A__ = TF_MODEL_MAPPING[config.__class__](UpperCamelCase )
# encoder-decoder has vocab size saved differently
A__ = config.vocab_size if hasattr(UpperCamelCase , """vocab_size""" ) else config.encoder.vocab_size
A__ = random_input_ids(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase , decoder_input_ids=UpperCamelCase , training=UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase , training=UpperCamelCase )
A__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
A__ = (
hasattr(UpperCamelCase , """architectures""" )
and isinstance(config.architectures , UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A__ = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
A__ = __import__("""transformers""" , fromlist=[model_class] )
A__ = getattr(UpperCamelCase , UpperCamelCase )
A__ = model_cls(UpperCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
A__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase )
# encoder-decoder has vocab size saved differently
A__ = config.vocab_size if hasattr(UpperCamelCase , """vocab_size""" ) else config.encoder.vocab_size
A__ = random_input_ids(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
A__ = model(UpperCamelCase , decoder_input_ids=UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase )[0]
A__ = tf.gradients(UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
A__ = model(UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase )[0]
A__ = tf.gradients(UpperCamelCase , model.trainable_variables )
return gradients
A__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase ( self: Dict , UpperCamelCase: List[Any] ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
A__ = timeit.repeat(
UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn\'t fit on GPU. {e}""" )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Callable[[], None] ):
"""simple docstring"""
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
A__ = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won\'t log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
A__ = 'N/A'
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
A__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
A__ = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase )
A__ = meminfo.used
A__ = Memory(UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
A__ = None
else:
A__ = measure_peak_memory_cpu(UpperCamelCase )
A__ = Memory(UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
A__ = stop_memory_tracing(UpperCamelCase )
if memory is None:
A__ = summary.total
else:
A__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn\'t fit on GPU. {e}""" )
return "N/A", None
| 335 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( __magic_name__ ):
lowercase = 42
lowercase = 42
def __init__( self : Any , a : UNetaDModel , a : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : List[str] , a : int = 1 , a : int = 2_000 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , **a : int , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.unet.config.sample_size
lowerCAmelCase__ : Union[str, Any] = (batch_size, 3, img_size, img_size)
lowerCAmelCase__ : Tuple = self.unet
lowerCAmelCase__ : Optional[Any] = randn_tensor(a , generator=a ) * self.scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(self.device )
self.scheduler.set_timesteps(a )
self.scheduler.set_sigmas(a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase__ : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase__ : Optional[Any] = self.unet(a , a ).sample
lowerCAmelCase__ : Dict = self.scheduler.step_correct(a , a , generator=a ).prev_sample
# prediction step
lowerCAmelCase__ : Optional[int] = model(a , a ).sample
lowerCAmelCase__ : Optional[Any] = self.scheduler.step_pred(a , a , a , generator=a )
lowerCAmelCase__ , lowerCAmelCase__ : str = output.prev_sample, output.prev_sample_mean
lowerCAmelCase__ : Any = sample_mean.clamp(0 , 1 )
lowerCAmelCase__ : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=a ) | 212 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_a = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase:
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@dataclass
class _UpperCAmelCase:
lowercase__ = 42
lowercase__ = 42
lowercase__ = None
lowercase__ = None
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'train'
lowercase__ = 'dev'
lowercase__ = 'test'
class _UpperCAmelCase:
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCAmelCase ( __a) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCAmelCase ( __a , __a , __a , __a , __a=False , __a="[CLS]" , __a=1 , __a="[SEP]" , __a=False , __a=False , __a=0 , __a=0 , __a=-1_00 , __a=0 , __a=True , ) -> List[InputFeatures]:
'''simple docstring'''
_UpperCamelCase = {label: i for i, label in enumerate(__a)}
_UpperCamelCase = []
for ex_index, example in enumerate(__a):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , __a , len(__a))
_UpperCamelCase = []
_UpperCamelCase = []
for word, label in zip(example.words , example.labels):
_UpperCamelCase = tokenizer.tokenize(__a)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__a) > 0:
tokens.extend(__a)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__a) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_UpperCamelCase = tokenizer.num_special_tokens_to_add()
if len(__a) > max_seq_length - special_tokens_count:
_UpperCamelCase = tokens[: (max_seq_length - special_tokens_count)]
_UpperCamelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_UpperCamelCase = [sequence_a_segment_id] * len(__a)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_UpperCamelCase = [cls_token] + tokens
_UpperCamelCase = [pad_token_label_id] + label_ids
_UpperCamelCase = [cls_token_segment_id] + segment_ids
_UpperCamelCase = tokenizer.convert_tokens_to_ids(__a)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_UpperCamelCase = [1 if mask_padding_with_zero else 0] * len(__a)
# Zero-pad up to the sequence length.
_UpperCamelCase = max_seq_length - len(__a)
if pad_on_left:
_UpperCamelCase = ([pad_token] * padding_length) + input_ids
_UpperCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_UpperCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids
_UpperCamelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__a) == max_seq_length
assert len(__a) == max_seq_length
assert len(__a) == max_seq_length
assert len(__a) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''')
logger.info('''guid: %s''' , example.guid)
logger.info('''tokens: %s''' , ''' '''.join([str(__a) for x in tokens]))
logger.info('''input_ids: %s''' , ''' '''.join([str(__a) for x in input_ids]))
logger.info('''input_mask: %s''' , ''' '''.join([str(__a) for x in input_mask]))
logger.info('''segment_ids: %s''' , ''' '''.join([str(__a) for x in segment_ids]))
logger.info('''label_ids: %s''' , ''' '''.join([str(__a) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCamelCase = None
features.append(
InputFeatures(
input_ids=__a , attention_mask=__a , token_type_ids=__a , label_ids=__a))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 42
lowercase__ = nn.CrossEntropyLoss().ignore_index
def __init__( self , __a , __a , __a , __a , __a , __a = None , __a=False , __a = Split.train , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = os.path.join(
__a , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__a)) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCamelCase = cached_features_file + '''.lock'''
with FileLock(__a):
if os.path.exists(__a) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''')
_UpperCamelCase = torch.load(__a)
else:
logger.info(F'''Creating features from dataset file at {data_dir}''')
_UpperCamelCase = token_classification_task.read_examples_from_file(__a , __a)
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCamelCase = token_classification_task.convert_examples_to_features(
__a , __a , __a , __a , cls_token_at_end=bool(model_type in ['''xlnet''']) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__a , pad_on_left=bool(tokenizer.padding_side == '''left''') , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''')
torch.save(self.features , __a)
def __len__( self) -> Union[str, Any]:
'''simple docstring'''
return len(self.features)
def __getitem__( self , __a) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase:
lowercase__ = 42
lowercase__ = -1_00
def __init__( self , __a , __a , __a , __a , __a , __a = None , __a=False , __a = Split.train , ) -> str:
'''simple docstring'''
_UpperCamelCase = token_classification_task.read_examples_from_file(__a , __a)
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCamelCase = token_classification_task.convert_examples_to_features(
__a , __a , __a , __a , cls_token_at_end=bool(model_type in ['''xlnet''']) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__a , pad_on_left=bool(tokenizer.padding_side == '''left''') , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCamelCase = tf.data.Dataset.from_generator(
__a , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None]), '''attention_mask''': tf.TensorShape([None])},
tf.TensorShape([None]),
) , )
else:
_UpperCamelCase = tf.data.Dataset.from_generator(
__a , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None]),
'''attention_mask''': tf.TensorShape([None]),
'''token_type_ids''': tf.TensorShape([None]),
},
tf.TensorShape([None]),
) , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self) -> Any:
'''simple docstring'''
return len(self.features)
def __getitem__( self , __a) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 355 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = embedding_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_hidden_groups
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForPreTraining(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForMaskedLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = AlbertForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForTokenClassification(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = AlbertForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCAmelCase ( self , __a , __a , __a=False) -> Tuple:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AlbertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*__a)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AlbertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = AlbertModel.from_pretrained('''albert-base-v2''')
_UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
| 100 | 0 |
class _lowercase :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = size
lowerCamelCase__ : List[str] = [0] * size
lowerCamelCase__ : str = [0] * size
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def lowerCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = value
while index < self.size:
lowerCamelCase__ : Tuple = self.get_prev(__lowerCamelCase ) + 1
if current_left_border == index:
lowerCamelCase__ : Optional[Any] = value
else:
lowerCamelCase__ : str = max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict = self.get_next(__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
lowerCamelCase__ : str = 0
while left <= right:
lowerCamelCase__ : Optional[Any] = self.get_prev(__lowerCamelCase )
if left <= current_left:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.tree[right] )
lowerCamelCase__ : Any = current_left
else:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 |
from __future__ import annotations
from typing import Any
def lowercase_ ( _A : list[Any] ):
"""simple docstring"""
create_state_space_tree(_A , [] , 0 )
def lowercase_ ( _A : list[Any] , _A : list[Any] , _A : int ):
"""simple docstring"""
if index == len(_A ):
print(_A )
return
create_state_space_tree(_A , _A , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_A , _A , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 184 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowercase_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowercase_ : Optional[int] = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowercase_ : List[str] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowercase_ : List[Any] = tempfile.mkdtemp()
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
# load decoder from hub
lowercase_ : List[Any] = '''hf-internal-testing/ngram-beam-search-decoder'''
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = self.add_kwargs_tokens_map.copy()
kwargs.update(__SCREAMING_SNAKE_CASE )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = self.get_feature_extractor()
lowercase_ : Dict = self.get_decoder()
lowercase_ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __SCREAMING_SNAKE_CASE )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowercase_ : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.get_feature_extractor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : Tuple = self.get_decoder()
lowercase_ : Dict = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = floats_list((3, 10_00) )
lowercase_ : Tuple = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
lowercase_ : List[str] = processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.get_feature_extractor()
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_decoder()
lowercase_ : int = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = '''This is a test string'''
lowercase_ : int = processor(text=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE=(2, 10, 16) , __SCREAMING_SNAKE_CASE=77 ):
"""simple docstring"""
np.random.seed(__SCREAMING_SNAKE_CASE )
return np.random.rand(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = self.get_feature_extractor()
lowercase_ : Union[str, Any] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_decoder()
lowercase_ : Tuple = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowercase_ : List[Any] = processor.decode(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = decoder.decode_beams(__SCREAMING_SNAKE_CASE )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = self.get_feature_extractor()
lowercase_ : Dict = self.get_tokenizer()
lowercase_ : List[Any] = self.get_decoder()
lowercase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
lowercase_ : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowercase_ : int = processor.batch_decode(__SCREAMING_SNAKE_CASE )
else:
with get_context(__SCREAMING_SNAKE_CASE ).Pool() as pool:
lowercase_ : int = processor.batch_decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = list(__SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as p:
lowercase_ : List[Any] = decoder.decode_beams_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ , lowercase_ : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__SCREAMING_SNAKE_CASE , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__SCREAMING_SNAKE_CASE , decoded_processor.logit_score )
self.assertListEqual(__SCREAMING_SNAKE_CASE , decoded_processor.lm_score )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = self.get_feature_extractor()
lowercase_ : int = self.get_tokenizer()
lowercase_ : Union[str, Any] = self.get_decoder()
lowercase_ : int = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = self._get_dummy_logits()
lowercase_ : Any = 15
lowercase_ : Any = -20.0
lowercase_ : Tuple = -4.0
lowercase_ : str = processor.batch_decode(
__SCREAMING_SNAKE_CASE , beam_width=__SCREAMING_SNAKE_CASE , beam_prune_logp=__SCREAMING_SNAKE_CASE , token_min_logp=__SCREAMING_SNAKE_CASE , )
lowercase_ : Dict = decoded_processor_out.text
lowercase_ : List[Any] = list(__SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as pool:
lowercase_ : int = decoder.decode_beams_batch(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , beam_width=__SCREAMING_SNAKE_CASE , beam_prune_logp=__SCREAMING_SNAKE_CASE , token_min_logp=__SCREAMING_SNAKE_CASE , )
lowercase_ : int = [d[0][0] for d in decoded_decoder_out]
lowercase_ : str = [d[0][2] for d in decoded_decoder_out]
lowercase_ : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __SCREAMING_SNAKE_CASE )
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.get_feature_extractor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = self.get_decoder()
lowercase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = self._get_dummy_logits()
lowercase_ : Tuple = 2.0
lowercase_ : Optional[int] = 5.0
lowercase_ : int = -20.0
lowercase_ : str = True
lowercase_ : List[str] = processor.batch_decode(
__SCREAMING_SNAKE_CASE , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , unk_score_offset=__SCREAMING_SNAKE_CASE , lm_score_boundary=__SCREAMING_SNAKE_CASE , )
lowercase_ : str = decoded_processor_out.text
lowercase_ : Dict = list(__SCREAMING_SNAKE_CASE )
decoder.reset_params(
alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , unk_score_offset=__SCREAMING_SNAKE_CASE , lm_score_boundary=__SCREAMING_SNAKE_CASE , )
with get_context('''fork''' ).Pool() as pool:
lowercase_ : int = decoder.decode_beams_batch(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
lowercase_ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowercase_ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
lowercase_ : Dict = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowercase_ : str = os.listdir(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowercase_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
lowercase_ : int = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowercase_ : str = os.listdir(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = os.listdir(__SCREAMING_SNAKE_CASE )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowercase_ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowercase_ : List[Any] = floats_list((3, 10_00) )
lowercase_ : Union[str, Any] = processor_wavaveca(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
lowercase_ : List[Any] = processor_auto(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
lowercase_ : Any = self._get_dummy_logits()
lowercase_ : Tuple = processor_wavaveca.batch_decode(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = processor_auto.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.get_feature_extractor()
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : int = self.get_decoder()
lowercase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def _snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowercase_ : List[str] = self._get_dummy_logits()[0]
lowercase_ : Tuple = processor.decode(__SCREAMING_SNAKE_CASE , output_word_offsets=__SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowercase_ : List[Any] = self._get_dummy_logits()
lowercase_ : Optional[Any] = processor.batch_decode(__SCREAMING_SNAKE_CASE , output_word_offsets=__SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _snake_case ( self ):
"""simple docstring"""
import torch
lowercase_ : List[Any] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
lowercase_ : Optional[Any] = iter(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = next(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowercase_ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowercase_ : List[str] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ).logits.cpu().numpy()
lowercase_ : Tuple = processor.decode(logits[0] , output_word_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowercase_ : Union[str, Any] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowercase_ : Union[str, Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''word''' ) ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(''' '''.join(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''word''' ) ) , output.text )
# output times
lowercase_ : Optional[Any] = torch.tensor(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''start_time''' ) )
lowercase_ : Union[str, Any] = torch.tensor(self.get_from_offsets(__SCREAMING_SNAKE_CASE , '''end_time''' ) )
# fmt: off
lowercase_ : Any = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
lowercase_ : List[str] = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=0.01 ) )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=0.01 ) )
| 264 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : Any = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class _UpperCamelCase ( _lowerCamelCase ):
"""simple docstring"""
__a : Any = 'blip_text_model'
def __init__( self , lowerCAmelCase__=3_05_24 , lowerCAmelCase__=7_68 , lowerCAmelCase__=7_68 , lowerCAmelCase__=30_72 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=8 , lowerCAmelCase__=5_12 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=2 , lowerCAmelCase__=0 , lowerCAmelCase__=1_02 , lowerCAmelCase__=True , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , sep_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = encoder_hidden_size
__lowercase = intermediate_size
__lowercase = projection_dim
__lowercase = hidden_dropout_prob
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = max_position_embeddings
__lowercase = layer_norm_eps
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = attention_probs_dropout_prob
__lowercase = is_decoder
__lowercase = use_cache
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
__lowercase = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__lowercase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _UpperCamelCase ( _lowerCamelCase ):
"""simple docstring"""
__a : List[Any] = 'blip_vision_model'
def __init__( self , lowerCAmelCase__=7_68 , lowerCAmelCase__=30_72 , lowerCAmelCase__=5_12 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_84 , lowerCAmelCase__=16 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = projection_dim
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
__lowercase = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _UpperCamelCase ( _lowerCamelCase ):
"""simple docstring"""
__a : List[Any] = 'blip'
__a : int = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=5_12 , lowerCAmelCase__=2.6592 , lowerCAmelCase__=2_56 , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if text_config is None:
__lowercase = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
__lowercase = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
__lowercase = BlipTextConfig(**_SCREAMING_SNAKE_CASE )
__lowercase = BlipVisionConfig(**_SCREAMING_SNAKE_CASE )
__lowercase = self.vision_config.hidden_size
__lowercase = projection_dim
__lowercase = logit_scale_init_value
__lowercase = 1.0
__lowercase = 0.02
__lowercase = image_text_hidden_size
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output | 210 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase__ :Dict = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase__ :List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase__ :Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase__ :Dict = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(_SCREAMING_SNAKE_CASE )-1}" )
if "norm" in key:
lowerCAmelCase__ :List[Any] = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase__ :str = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase__ :Tuple = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(_SCREAMING_SNAKE_CASE )-1}" )
if "layer_norm1" in key:
lowerCAmelCase__ :Optional[Any] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase__ :Union[str, Any] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase__ :int = key[key.find('block' ) + len('block' )]
lowerCAmelCase__ :Union[str, Any] = key.replace(F"block{idx}" , F"block.{int(_SCREAMING_SNAKE_CASE )-1}" )
if "attn.q" in key:
lowerCAmelCase__ :Tuple = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase__ :List[Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase__ :List[Any] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase__ :Optional[Any] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase__ :Union[str, Any] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase__ :Any = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase__ :Any = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase__ :List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase__ :List[str] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase__ :Any = key.replace(F"linear_c{idx}" , F"linear_c.{int(_SCREAMING_SNAKE_CASE )-1}" )
if "bot_conv" in key:
lowerCAmelCase__ :Optional[Any] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase__ :List[Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase__ :Any = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase__ :List[str] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase__ :Any = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase__ :Union[str, Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase__ :Optional[int] = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase__ :str = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase__ :Dict = value
return new_state_dict
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase__ :Union[str, Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
lowerCAmelCase__ :Union[str, Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase__ :int = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase__ :Optional[Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase__ :Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase__ :Tuple = kv_bias[config.hidden_sizes[i] :]
def __A () ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ :List[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase__ :Optional[int] = GLPNImageProcessor()
# prepare image
lowerCAmelCase__ :List[Any] = prepare_img()
lowerCAmelCase__ :str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase__ :Any = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase__ :Optional[int] = rename_keys(_SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase__ :Tuple = GLPNForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
lowerCAmelCase__ :Dict = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
lowerCAmelCase__ :List[Any] = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"Unknown model name: {model_name}" )
lowerCAmelCase__ :List[str] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__A = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 369 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :int = """swin"""
__magic_name__ :Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=9_6 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 1_2, 2_4] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Any = image_size
lowerCAmelCase__ :List[Any] = patch_size
lowerCAmelCase__ :Optional[int] = num_channels
lowerCAmelCase__ :str = embed_dim
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :List[str] = len(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = num_heads
lowerCAmelCase__ :List[Any] = window_size
lowerCAmelCase__ :List[Any] = mlp_ratio
lowerCAmelCase__ :int = qkv_bias
lowerCAmelCase__ :Optional[int] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :List[Any] = drop_path_rate
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :Dict = use_absolute_embeddings
lowerCAmelCase__ :int = layer_norm_eps
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :int = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ :str = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
lowerCAmelCase__ :str = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
| 254 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: List[Any] = OpenAIGPTTokenizer
A: Optional[int] = OpenAIGPTTokenizerFast
A: str = True
A: Dict = False
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCamelCase__ : Optional[int] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase__ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
UpperCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Any ) -> List[Any]:
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase__ : Any = '''lower'''
UpperCamelCase__ : List[Any] = ['''low''', '''er</w>''']
UpperCamelCase__ : Dict = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = tokens + ['''<unk>''']
UpperCamelCase__ : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[Any]=15 ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
# Simple input
UpperCamelCase__ : Union[str, Any] = '''This is a simple input'''
UpperCamelCase__ : Dict = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase__ : List[Any] = ('''This is a simple input''', '''This is a pair''')
UpperCamelCase__ : Union[str, Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __magic_name__ ( __lowerCAmelCase):
pass
| 146 |
from collections.abc import Callable
def _a ( SCREAMING_SNAKE_CASE : Callable[[float], float] , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
UpperCamelCase__ : float = a
UpperCamelCase__ : float = b
if function(SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCamelCase__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) < 0:
UpperCamelCase__ : Tuple = mid
else:
UpperCamelCase__ : Dict = mid
UpperCamelCase__ : List[str] = start + (end - start) / 2.0
return mid
def _a ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 146 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : Dict = 16
_SCREAMING_SNAKE_CASE : Optional[int] = 32
def UpperCamelCase_( snake_case : Accelerator , snake_case : int = 1_6 ):
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ = load_dataset("glue" , "mrpc" )
def tokenize_function(snake_case : List[str] ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
snake_case , batched=snake_case , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 1_6
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
snake_case , padding="longest" , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors="pt" , )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
snake_case_ = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : List[str] = mocked_dataloaders # noqa: F811
def UpperCamelCase_( snake_case : str , snake_case : Optional[int] ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , snake_case ) == "1":
snake_case_ = 2
# New Code #
snake_case_ = int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["lr"]
snake_case_ = int(config["num_epochs"] )
snake_case_ = int(config["seed"] )
snake_case_ = int(config["batch_size"] )
snake_case_ = evaluate.load("glue" , "mrpc" )
set_seed(snake_case )
snake_case_ , snake_case_ = get_dataloaders(snake_case , snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=1_0_0 , num_training_steps=(len(snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case ):
snake_case_ = model(**snake_case )
snake_case_ = output.loss
accelerator.backward(snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**snake_case )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , snake_case )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=snake_case , default=snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
snake_case_ = parser.parse_args()
snake_case_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 92 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE : Tuple = get_logger(__name__)
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : int , snake_case : List[Any]=0 ):
'''simple docstring'''
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
snake_case_ = os.path.join(snake_case , snake_case )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(snake_case , f'{MODEL_NAME}_{model_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(f'Saving model to {ckpt_dir}' )
snake_case_ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=snake_case , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : int , snake_case : Union[str, Any] , snake_case : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
snake_case_ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading model from {input_model_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading model from {input_model_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(snake_case , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
snake_case_ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case , storage_reader=dist_cp.FileSystemReader(snake_case ) , planner=DefaultLoadPlanner() , )
snake_case_ = state_dict["model"]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(snake_case )
def UpperCamelCase_( snake_case : str , snake_case : List[str] , snake_case : Any , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple=0 ):
'''simple docstring'''
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(snake_case , snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
snake_case_ = os.path.join(snake_case , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : int , snake_case : Optional[int] , snake_case : Union[str, Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
snake_case_ = (
os.path.join(snake_case , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(snake_case ) , )
snake_case_ = optim_state["optimizer"]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
snake_case_ = FSDP.optim_state_dict_to_load(snake_case , snake_case , snake_case )
optimizer.load_state_dict(snake_case )
| 92 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 42
lowercase = 42
class UpperCamelCase_ ( __magic_name__ , __magic_name__ ):
lowercase = 1
@register_to_config
def __init__( self , A = 2000 , A = 0.1_5 , A = 0.0_1 , A = 1_3_4_8.0 , A = 1e-5 , A = 1 , ) -> Optional[Any]:
# standard deviation of the initial noise distribution
UpperCAmelCase : Any = sigma_max
# setable values
UpperCAmelCase : Tuple = None
self.set_sigmas(A , A , A , A )
def _lowercase( self , A , A = None ) -> torch.FloatTensor:
return sample
def _lowercase( self , A , A = None , A = None ) -> Optional[Any]:
UpperCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase : List[Any] = torch.linspace(1 , A , A , device=A )
def _lowercase( self , A , A = None , A = None , A = None ) -> Dict:
UpperCAmelCase : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase : Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(A , A )
UpperCAmelCase : Dict = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase : Any = torch.exp(torch.linspace(math.log(A ) , math.log(A ) , A ) )
UpperCAmelCase : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowercase( self , A , A ) -> List[str]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _lowercase( self , A , A , A , A = None , A = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
UpperCAmelCase : str = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase : Optional[int] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase : List[str] = timesteps.to(self.discrete_sigmas.device )
UpperCAmelCase : Union[str, Any] = self.discrete_sigmas[timesteps].to(sample.device )
UpperCAmelCase : Optional[int] = self.get_adjacent_sigma(A , A ).to(sample.device )
UpperCAmelCase : Tuple = torch.zeros_like(A )
UpperCAmelCase : Dict = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase : Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCAmelCase : Optional[int] = diffusion.unsqueeze(-1 )
UpperCAmelCase : Union[str, Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase : Optional[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=A , device=sample.device , dtype=sample.dtype )
UpperCAmelCase : Any = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=A , prev_sample_mean=A )
def _lowercase( self , A , A , A = None , A = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase : Any = randn_tensor(sample.shape , layout=sample.layout , generator=A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase : str = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase : str = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase : int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCAmelCase : List[str] = step_size.unsqueeze(-1 )
UpperCAmelCase : Optional[Any] = sample + step_size * model_output
UpperCAmelCase : str = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def _lowercase( self , A , A , A , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase : int = timesteps.to(original_samples.device )
UpperCAmelCase : Any = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCAmelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(A ) * sigmas[:, None, None, None]
)
UpperCAmelCase : Dict = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 265 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = torch.device("""cpu""")
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def __lowerCamelCase ( _lowercase ) -> Dict:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase )
UpperCAmelCase : str = val
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Tuple = []
for k in state_dict.keys():
UpperCAmelCase : Dict = k
if ".pwconv" in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : Optional[Any] = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : List[Any] = 1_0_0_0
UpperCAmelCase : List[str] = """huggingface/label-files"""
UpperCAmelCase : Tuple = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Tuple = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : List[Any] = [3, 3, 6, 4]
UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : str = [3, 3, 9, 6]
UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : List[Any] = [4, 3, 1_0, 5]
UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Any = [4, 4, 1_2, 6]
UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase )
else:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : str = checkpoint
UpperCAmelCase : Tuple = create_rename_keys(_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval()
hf_model.load_state_dict(_lowercase )
# prepare test inputs
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : List[str] = get_expected_output(_lowercase )
UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
a : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 265 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
lowercase__ : int = UniSpeechSatForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = downstream_dict['projector.weight']
lowercase__ : Union[str, Any] = downstream_dict['projector.bias']
lowercase__ : Union[str, Any] = downstream_dict['model.post_net.linear.weight']
lowercase__ : int = downstream_dict['model.post_net.linear.bias']
return model
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
lowercase__ : str = UniSpeechSatForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = downstream_dict['model.linear.weight']
lowercase__ : List[str] = downstream_dict['model.linear.bias']
return model
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
lowercase__ : List[Any] = UniSpeechSatForXVector.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = downstream_dict['connector.weight']
lowercase__ : str = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase__ : int = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowercase__ : int = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowercase__ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
lowercase__ : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
lowercase__ : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
lowercase__ : Any = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
lowercase__ : str = downstream_dict['objective.W']
return model
@torch.no_grad()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
lowercase__ : str = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
lowercase__ : Optional[int] = checkpoint['Downstream']
lowercase__ : Tuple = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ )
lowercase__ : str = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
lowercase__ : int = convert_classification(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif arch.endswith('ForAudioFrameClassification' ):
lowercase__ : Any = convert_diarization(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif arch.endswith('ForXVector' ):
lowercase__ : Optional[int] = convert_xvector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowercase__ : Tuple = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
snake_case_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 216 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
snake_case_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
lowercase__ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[int] = value
elif weight_type == "weight_g":
lowercase__ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase__ : Tuple = value
elif weight_type == "bias":
lowercase__ : Any = value
else:
lowercase__ : Union[str, Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = fairseq_model.state_dict()
lowercase__ : Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[str] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : Tuple = True
if "*" in mapped_key:
lowercase__ : Any = name.split(SCREAMING_SNAKE_CASE_ )[0].split('.' )[-2]
lowercase__ : Optional[Any] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
lowercase__ : int = 'weight_g'
elif "weight_v" in name:
lowercase__ : Any = 'weight_v'
elif "bias" in name:
lowercase__ : str = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : Union[str, Any] = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : List[Any] = full_name.split('conv_layers.' )[-1]
lowercase__ : Dict = name.split('.' )
lowercase__ : List[str] = int(items[0] )
lowercase__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Dict = UniSpeechSatConfig()
lowercase__ : str = ''
if is_finetuned:
lowercase__ : Any = UniSpeechSatForCTC(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Optional[Any] = UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ , lowercase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : List[str] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
snake_case_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 216 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a : Optional[int] = 16
a : str = 32
def lowercase ( __magic_name__ ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCamelCase__ :
"""simple docstring"""
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase : int = torch.cuda.memory_allocated()
UpperCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
UpperCAmelCase : Optional[int] = bamb(self.end - self.begin )
UpperCAmelCase : Dict = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowercase ( __magic_name__ , __magic_name__ = 16 , __magic_name__ = "bert-base-cased" , __magic_name__ = 320 , __magic_name__ = 160 , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__magic_name__ )
UpperCAmelCase : List[str] = load_dataset(
"glue" , "mrpc" , split={"train": F"train[:{n_train}]", "validation": F"validation[:{n_val}]"} )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase : List[str] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__magic_name__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase : Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__magic_name__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__magic_name__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCAmelCase : int = DataLoader(
tokenized_datasets["train"] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase : Union[str, Any] = config["lr"]
UpperCAmelCase : str = int(config["num_epochs"] )
UpperCAmelCase : str = int(config["seed"] )
UpperCAmelCase : Any = int(config["batch_size"] )
UpperCAmelCase : Optional[int] = args.model_name_or_path
set_seed(__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ )
# Instantiate optimizer
UpperCAmelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase : List[Any] = optimizer_cls(params=model.parameters() , lr=__magic_name__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Union[str, Any] = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , )
else:
UpperCAmelCase : Any = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase : List[Any] = 0
# Now we train the model
UpperCAmelCase : Optional[Any] = {}
for epoch in range(__magic_name__ , __magic_name__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__magic_name__ ):
UpperCAmelCase : Tuple = model(**__magic_name__ )
UpperCAmelCase : Optional[Any] = outputs.loss
UpperCAmelCase : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase : Any = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Dict = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__magic_name__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__magic_name__ , )
parser.add_argument(
"--output_dir" , type=__magic_name__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__magic_name__ , default=__magic_name__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__magic_name__ , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__magic_name__ , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__magic_name__ , default=1 , help="Number of train epochs." , )
UpperCAmelCase : List[Any] = parser.parse_args()
UpperCAmelCase : Dict = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 311 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowercase: List[Any] = pytest.mark.integration
@require_faiss
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(a_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase_ ( self : List[str] ):
"""simple docstring"""
import faiss
UpperCamelCase__ = self._create_dummy_dataset()
UpperCamelCase__ = dset.map(
lambda a_, a_ : {"vecs": i * np.ones(5, dtype=np.floataa )}, with_indices=a_, keep_in_memory=a_ )
UpperCamelCase__ = dset.add_faiss_index("vecs", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase__ , UpperCamelCase__ = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.floataa ) )
self.assertEqual(examples["filename"][0], "my_name-train_29" )
dset.drop_index("vecs" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
import faiss
UpperCamelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name="vecs", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT, )
UpperCamelCase__ , UpperCamelCase__ = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.floataa ) )
self.assertEqual(examples["filename"][0], "my_name-train_29" )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
import faiss
UpperCamelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name="vecs", metric_type=faiss.METRIC_INNER_PRODUCT, )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
dset.save_faiss_index("vecs", tmp_file.name )
dset.load_faiss_index("vecs2", tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase__ , UpperCamelCase__ = dset.get_nearest_examples("vecs2", np.ones(5, dtype=np.floataa ) )
self.assertEqual(examples["filename"][0], "my_name-train_29" )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(a_, partial(dset.get_nearest_examples, "vecs2", np.ones(5, dtype=np.floataa ) ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
UpperCamelCase__ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase__ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCamelCase__ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
UpperCamelCase__ = Elasticsearch()
dset.add_elasticsearch_index("filename", es_client=a_ )
UpperCamelCase__ , UpperCamelCase__ = dset.get_nearest_examples("filename", "my_name-train_29" )
self.assertEqual(examples["filename"][0], "my_name-train_29" )
@require_faiss
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : Tuple ):
"""simple docstring"""
import faiss
UpperCamelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal, 5 )
index.add_vectors(np.zeros((5, 5), dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal, 10 )
# single query
UpperCamelCase__ = np.zeros(5, dtype=np.floataa )
UpperCamelCase__ = 1
UpperCamelCase__ , UpperCamelCase__ = index.search(a_ )
self.assertRaises(a_, index.search, query.reshape(-1, 1 ) )
self.assertGreater(scores[0], 0 )
self.assertEqual(indices[0], 1 )
# batched queries
UpperCamelCase__ = np.eye(5, dtype=np.floataa )[::-1]
UpperCamelCase__ , UpperCamelCase__ = index.search_batch(a_ )
self.assertRaises(a_, index.search_batch, queries[0] )
UpperCamelCase__ = [scores[0] for scores in total_scores]
UpperCamelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ), 0 )
self.assertListEqual([4, 3, 2, 1, 0], a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
import faiss
UpperCamelCase__ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index, faiss.IndexFlat )
UpperCamelCase__ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index, faiss.IndexLSH )
with self.assertRaises(a_ ):
UpperCamelCase__ = FaissIndex(string_factory="Flat", custom_index=faiss.IndexFlat(5 ) )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
import faiss
UpperCamelCase__ = faiss.IndexFlat(5 )
UpperCamelCase__ = FaissIndex(custom_index=a_ )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index, faiss.IndexFlat )
def lowercase_ ( self : int ):
"""simple docstring"""
import faiss
UpperCamelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase__ = np.zeros(5, dtype=np.floataa )
UpperCamelCase__ = 1
UpperCamelCase__ , UpperCamelCase__ = index.search(a_ )
self.assertGreater(scores[0], 0 )
self.assertEqual(indices[0], 1 )
@require_faiss
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
import faiss
UpperCamelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase__ = "index.faiss"
UpperCamelCase__ = F'mock://{index_name}'
index.save(_UpperCamelCase , storage_options=mockfs.storage_options )
UpperCamelCase__ = FaissIndex.load(_UpperCamelCase , storage_options=mockfs.storage_options )
UpperCamelCase__ = np.zeros(5 , dtype=np.floataa )
UpperCamelCase__ = 1
UpperCamelCase__ , UpperCamelCase__ = index.search(_UpperCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase__ = Elasticsearch()
UpperCamelCase__ = {"acknowledged": True}
UpperCamelCase__ = ElasticSearchIndex(es_client=a_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
UpperCamelCase__ = "foo"
UpperCamelCase__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase__ , UpperCamelCase__ = index.search(a_ )
self.assertEqual(scores[0], 1 )
self.assertEqual(indices[0], 0 )
# single query with timeout
UpperCamelCase__ = "foo"
UpperCamelCase__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase__ , UpperCamelCase__ = index.search(a_, request_timeout=30 )
self.assertEqual(scores[0], 1 )
self.assertEqual(indices[0], 0 )
# batched queries
UpperCamelCase__ = ["foo", "bar", "foobar"]
UpperCamelCase__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase__ , UpperCamelCase__ = index.search_batch(a_ )
UpperCamelCase__ = [scores[0] for scores in total_scores]
UpperCamelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ), 0 )
self.assertListEqual([1, 1, 1], a_ )
# batched queries with timeout
UpperCamelCase__ = ["foo", "bar", "foobar"]
UpperCamelCase__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase__ , UpperCamelCase__ = index.search_batch(a_, request_timeout=30 )
UpperCamelCase__ = [scores[0] for scores in total_scores]
UpperCamelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ), 0 )
self.assertListEqual([1, 1, 1], a_ ) | 31 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 31 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =WavaVecaForSequenceClassification.from_pretrained(lowercase , config=lowercase )
a =downstream_dict['''projector.weight''']
a =downstream_dict['''projector.bias''']
a =downstream_dict['''model.post_net.linear.weight''']
a =downstream_dict['''model.post_net.linear.bias''']
return model
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =WavaVecaForAudioFrameClassification.from_pretrained(lowercase , config=lowercase )
a =downstream_dict['''model.linear.weight''']
a =downstream_dict['''model.linear.bias''']
return model
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =WavaVecaForXVector.from_pretrained(lowercase , config=lowercase )
a =downstream_dict['''connector.weight''']
a =downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a =downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
a =downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
a =downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
a =downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
a =downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
a =downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
a =downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =torch.load(lowercase , map_location='''cpu''' )
a =checkpoint['''Downstream''']
a =WavaVecaConfig.from_pretrained(lowercase )
a =WavaVecaFeatureExtractor.from_pretrained(
lowercase , return_attention_mask=lowercase , do_normalize=lowercase )
a =hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
a =convert_classification(lowercase , lowercase , lowercase )
elif arch.endswith('''ForAudioFrameClassification''' ):
a =convert_diarization(lowercase , lowercase , lowercase )
elif arch.endswith('''ForXVector''' ):
a =convert_xvector(lowercase , lowercase , lowercase )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
a =checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 81 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __snake_case ( a ):
UpperCAmelCase__ : Union[List[PIL.Image.Image], np.ndarray]
UpperCAmelCase__ : Optional[List[bool]]
UpperCAmelCase__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 7 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self ,snake_case ,snake_case=2 ,snake_case=True ,snake_case=False ,snake_case=10 ,snake_case=3 ,snake_case=32 * 8 ,snake_case=32 * 8 ,snake_case=4 ,snake_case=64 ,):
'''simple docstring'''
lowercase : str = parent
lowercase : int = batch_size
lowercase : Tuple = is_training
lowercase : List[Any] = use_auxiliary_loss
lowercase : Optional[int] = num_queries
lowercase : str = num_channels
lowercase : Any = min_size
lowercase : Optional[Any] = max_size
lowercase : Optional[int] = num_labels
lowercase : int = hidden_dim
lowercase : Union[str, Any] = hidden_dim
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=SCREAMING_SNAKE_CASE__ ) > 0.5
).float()
lowercase : Tuple = (torch.rand((self.batch_size, self.num_labels) ,device=SCREAMING_SNAKE_CASE__ ) > 0.5).long()
lowercase : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
lowercase : str = self.num_queries
lowercase : List[Any] = self.num_labels
lowercase : Optional[Any] = [1, 1, 1, 1]
lowercase : Union[str, Any] = self.num_channels
lowercase : int = 64
lowercase : List[Any] = 128
lowercase : Union[str, Any] = self.hidden_dim
lowercase : Tuple = self.hidden_dim
lowercase : int = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.prepare_config_and_inputs()
lowercase : List[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = output.encoder_hidden_states
lowercase : List[str] = output.pixel_decoder_hidden_states
lowercase : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=False ):
'''simple docstring'''
with torch.no_grad():
lowercase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : int = model(pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
def comm_check_on_output(snake_case ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase : Dict = model(pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )
comm_check_on_output(SCREAMING_SNAKE_CASE__ )
lowercase : str = model(
pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__ )
comm_check_on_output(SCREAMING_SNAKE_CASE__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class __snake_case ( _a , _a , unittest.TestCase ):
_a : Tuple= (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_a : Tuple= {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_a : Dict= False
_a : List[Any]= False
_a : Any= False
_a : List[Any]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = MaskaFormerModelTester(self )
lowercase : str = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = model_class(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Any = [*signature.parameters.keys()]
lowercase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__ )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase : str = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = (self.model_tester.min_size,) * 2
lowercase : Dict = {
"pixel_values": torch.randn((2, 3, *size) ,device=SCREAMING_SNAKE_CASE__ ),
"mask_labels": torch.randn((2, 10, *size) ,device=SCREAMING_SNAKE_CASE__ ),
"class_labels": torch.zeros(2 ,10 ,device=SCREAMING_SNAKE_CASE__ ).long(),
}
lowercase : Union[str, Any] = self.model_tester.get_config()
lowercase : Dict = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = model(**SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[Any] = model_class(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = model(**SCREAMING_SNAKE_CASE__ ,output_attentions=SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase : Any = self.all_model_classes[1]
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowercase : int = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.all_model_classes[1]
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
lowercase : Union[str, Any] = True
lowercase : Optional[int] = True
lowercase : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
model.train()
lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__ )
lowercase : str = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase : Optional[Any] = 1e-4
def _snake_case( ) -> Dict:
lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = self.default_image_processor
lowercase : int = prepare_img()
lowercase : Dict = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ ,(1, 3, 384, 384) )
with torch.no_grad():
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) )
lowercase : Dict = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) )
lowercase : Optional[Any] = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Optional[int] = self.default_image_processor
lowercase : Dict = prepare_img()
lowercase : str = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ ,(1, 3, 384, 384) )
with torch.no_grad():
lowercase : Tuple = model(**SCREAMING_SNAKE_CASE__ )
# masks_queries_logits
lowercase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase : Optional[Any] = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
lowercase : Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) )
# class_queries_logits
lowercase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
lowercase : List[str] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : int = self.default_image_processor
lowercase : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
lowercase : List[str] = inputs["pixel_values"].to(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = [el.to(SCREAMING_SNAKE_CASE__ ) for el in inputs["mask_labels"]]
lowercase : Any = [el.to(SCREAMING_SNAKE_CASE__ ) for el in inputs["class_labels"]]
with torch.no_grad():
lowercase : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs.loss is not None )
| 20 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _snake_case ( _a ):
_A : Optional[int] = '''t5'''
_A : Union[str, Any] = ['''past_key_values''']
_A : Dict = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=32_128 ,SCREAMING_SNAKE_CASE__ : List[str]=512 ,SCREAMING_SNAKE_CASE__ : Any=64 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_048 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6 ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Dict=8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=32 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=128 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=1e-6 ,SCREAMING_SNAKE_CASE__ : str=1.0 ,SCREAMING_SNAKE_CASE__ : int="relu" ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Dict=0 ,SCREAMING_SNAKE_CASE__ : Tuple=1 ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
SCREAMING_SNAKE_CASE:int = vocab_size
SCREAMING_SNAKE_CASE:Any = d_model
SCREAMING_SNAKE_CASE:Union[str, Any] = d_kv
SCREAMING_SNAKE_CASE:Optional[int] = d_ff
SCREAMING_SNAKE_CASE:Tuple = num_layers
SCREAMING_SNAKE_CASE:str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE:Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE:int = relative_attention_num_buckets
SCREAMING_SNAKE_CASE:Tuple = relative_attention_max_distance
SCREAMING_SNAKE_CASE:Dict = dropout_rate
SCREAMING_SNAKE_CASE:List[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE:List[str] = initializer_factor
SCREAMING_SNAKE_CASE:Tuple = feed_forward_proj
SCREAMING_SNAKE_CASE:str = use_cache
SCREAMING_SNAKE_CASE:Optional[Any] = self.feed_forward_proj.split("-" )
SCREAMING_SNAKE_CASE:Any = act_info[-1]
SCREAMING_SNAKE_CASE:Tuple = act_info[0] == "gated"
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE:int = "gelu_new"
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:int = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
SCREAMING_SNAKE_CASE:Optional[int] = "past_encoder_sequence + sequence"
SCREAMING_SNAKE_CASE:str = {0: "batch"}
SCREAMING_SNAKE_CASE:List[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE:Tuple = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE:List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction="inputs" )
return common_inputs
@property
def __UpperCamelCase ( self : Optional[int] ):
return 13
| 139 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : int ):
assert isinstance(_snake_case , _snake_case ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
__lowerCAmelCase = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(_snake_case )
else:
__lowerCAmelCase = sylvester(number - 1 )
__lowerCAmelCase = num - 1
__lowerCAmelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 369 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def __SCREAMING_SNAKE_CASE( *_A , **_A ):
"""simple docstring"""
pass
def _a ( SCREAMING_SNAKE_CASE_ : Image ):
__lowerCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a__ ( unittest.TestCase ):
_a : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DepthEstimationPipeline(model=_A , image_processor=_A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , _A )
import datasets
__lowerCAmelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__lowerCAmelCase = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , _A , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@slow
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "Intel/dpt-large"
__lowerCAmelCase = pipeline("depth-estimation" , model=_A )
__lowerCAmelCase = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__lowerCAmelCase = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_62 )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 102 | 0 |
from itertools import count
def __lowerCamelCase ( lowerCamelCase__ : int = 50 ):
'''simple docstring'''
lowerCamelCase = [1] * min_block_length
for n in count(lowerCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 252 |
import re
import string
import numpy as np
import datasets
UpperCAmelCase : List[str] = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
UpperCAmelCase : str = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
UpperCAmelCase : Dict = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def __A ( self , A , A , A=None , A=False , A=False , A=False , ) -> List[str]:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase = np.array([re.sub(A , """""" , A ) for x in predictions] )
lowerCamelCase = np.array([re.sub(A , """""" , A ) for x in references] )
else:
lowerCamelCase = np.asarray(A )
lowerCamelCase = np.asarray(A )
if ignore_case:
lowerCamelCase = np.char.lower(A )
lowerCamelCase = np.char.lower(A )
if ignore_punctuation:
lowerCamelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowerCamelCase = np.char.translate(A , table=A )
lowerCamelCase = np.char.translate(A , table=A )
if ignore_numbers:
lowerCamelCase = string.digits.maketrans("""""" , """""" , string.digits )
lowerCamelCase = np.char.translate(A , table=A )
lowerCamelCase = np.char.translate(A , table=A )
lowerCamelCase = predictions == references
return {"exact_match": np.mean(A ) * 1_00}
| 252 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def _A ( A__ ):
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
__lowercase = random.randrange(3 , A__ )
if pow(A__ , 2 , A__ ) == 1:
continue
if pow(A__ , A__ , A__ ) == 1:
continue
return g
def _A ( A__ ):
"""simple docstring"""
print('''Generating prime p...''' )
__lowercase = rabin_miller.generate_large_prime(A__ ) # select large prime number.
__lowercase = primitive_root(A__ ) # one primitive root on modulo p.
__lowercase = random.randrange(3 , A__ ) # private_key -> have to be greater than 2 for safety.
__lowercase = cryptomath.find_mod_inverse(pow(A__ , A__ , A__ ) , A__ )
__lowercase = (key_size, e_a, e_a, p)
__lowercase = (key_size, d)
return public_key, private_key
def _A ( A__ , A__ ):
"""simple docstring"""
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print('''\nWARNING:''' )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__lowercase , __lowercase = generate_key(A__ )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , '''w''' ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , '''w''' ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def _A ( ):
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 52 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _A ( A__ ):
"""simple docstring"""
for i in range(0 , A__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def _A ( A__ ):
"""simple docstring"""
for i in range(A__ , 0 , -1 ):
for _ in range(A__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def _A ( A__ ):
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
lowerCAmelCase__ = 1
while K:
lowerCAmelCase__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
lowerCAmelCase__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 52 | 1 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[str] = "T5Config"
def lowerCamelCase ( UpperCAmelCase__ : jnp.array , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> jnp.ndarray:
lowercase_ : str = jnp.zeros_like(UpperCAmelCase__ )
lowercase_ : str = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowercase_ : List[Any] = shifted_input_ids.at[:, 0].set(UpperCAmelCase__ )
lowercase_ : List[Any] = jnp.where(shifted_input_ids == -100 , UpperCAmelCase__ , UpperCAmelCase__ )
return shifted_input_ids
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
| 239 | '''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Dict = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 239 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase_ : Optional[int] = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase_ : Dict = """UperNetConfig"""
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : Union[int, Tuple[int, int]] , lowercase_ : Union[int, Tuple[int, int], str] = 0 , lowercase_ : bool = False , lowercase_ : Union[int, Tuple[int, int]] = 1 , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = nn.Convad(
in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=lowercase_ , padding=lowercase_ , bias=lowercase_ , dilation=lowercase_ , )
SCREAMING_SNAKE_CASE_ : int = nn.BatchNormad(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = nn.ReLU()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : torch.Tensor):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.conv(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.batch_norm(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = self.activation(lowercase_)
return output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[int] = [
nn.AdaptiveAvgPoolad(lowercase_),
UperNetConvModule(lowercase_ , lowercase_ , kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : torch.Tensor):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = input
for layer in self.layers:
SCREAMING_SNAKE_CASE_ : List[Any] = layer(lowercase_)
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple[int, ...] , lowercase_ : int , lowercase_ : int , lowercase_ : bool):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pool_scales
SCREAMING_SNAKE_CASE_ : List[str] = align_corners
SCREAMING_SNAKE_CASE_ : Tuple = in_channels
SCREAMING_SNAKE_CASE_ : Tuple = channels
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, pool_scale in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Dict = UperNetPyramidPoolingBlock(pool_scale=lowercase_ , in_channels=lowercase_ , channels=lowercase_)
self.blocks.append(lowercase_)
self.add_module(str(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : torch.Tensor):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE_ : List[Any] = ppm(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.functional.interpolate(
lowercase_ , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners)
ppm_outs.append(lowercase_)
return ppm_outs
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str]):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[Any] = config
SCREAMING_SNAKE_CASE_ : str = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_channels
SCREAMING_SNAKE_CASE_ : Dict = config.hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = nn.Convad(self.channels , config.num_labels , kernel_size=1)
# PSP Module
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE_ : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE_ : Tuple = nn.ModuleList()
SCREAMING_SNAKE_CASE_ : str = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE_ : int = UperNetConvModule(lowercase_ , self.channels , kernel_size=1)
SCREAMING_SNAKE_CASE_ : str = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1)
self.lateral_convs.append(lowercase_)
self.fpn_convs.append(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = UperNetConvModule(
len(self.in_channels) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
self.apply(self._init_weights)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Any):
'''simple docstring'''
if isinstance(lowercase_ , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = inputs[-1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [x]
psp_outs.extend(self.psp_modules(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat(lowercase_ , dim=1)
SCREAMING_SNAKE_CASE_ : Dict = self.bottleneck(lowercase_)
return output
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : torch.Tensor):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(lowercase_))
# build top-down path
SCREAMING_SNAKE_CASE_ : List[Any] = len(lowercase_)
for i in range(used_backbone_levels - 1 , 0 , -1):
SCREAMING_SNAKE_CASE_ : Any = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE_ : List[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowercase_ , mode='''bilinear''' , align_corners=self.align_corners)
# build outputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1 , 0 , -1):
SCREAMING_SNAKE_CASE_ : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat(lowercase_ , dim=1)
SCREAMING_SNAKE_CASE_ : Dict = self.fpn_bottleneck(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.classifier(lowercase_)
return output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowercase_ : Dict , lowercase_ : int = 2 , lowercase_ : int = 3 , lowercase_ : Union[int, Tuple[int, int]] = 1):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : int = config
SCREAMING_SNAKE_CASE_ : Dict = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE_ : Tuple = config.auxiliary_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE_ : Dict = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE_ : Tuple = in_index
SCREAMING_SNAKE_CASE_ : int = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE_ : List[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_))
for i in range(self.num_convs - 1):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_))
if self.num_convs == 0:
SCREAMING_SNAKE_CASE_ : str = nn.Identity()
else:
SCREAMING_SNAKE_CASE_ : Tuple = nn.Sequential(*lowercase_)
if self.concat_input:
SCREAMING_SNAKE_CASE_ : Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowercase_ , padding=kernel_size // 2)
SCREAMING_SNAKE_CASE_ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
self.apply(self._init_weights)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]):
'''simple docstring'''
if isinstance(lowercase_ , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : torch.Tensor):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE_ : Tuple = self.convs(lowercase_)
if self.concat_input:
SCREAMING_SNAKE_CASE_ : Dict = self.conv_cat(torch.cat([hidden_states, output] , dim=1))
SCREAMING_SNAKE_CASE_ : Any = self.classifier(lowercase_)
return output
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = UperNetConfig
__UpperCamelCase = "pixel_values"
__UpperCamelCase = True
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : List[Any]=False):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
UpperCAmelCase_ : Optional[int] = r"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ : int = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : str , lowercase_ : str):
'''simple docstring'''
super().__init__(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = AutoBackbone.from_config(config.backbone_config)
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE_ : Dict = UperNetHead(lowercase_ , in_channels=self.backbone.channels)
SCREAMING_SNAKE_CASE_ : List[Any] = UperNetFCNHead(lowercase_) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length'''))
@replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : str = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE_ : Dict = self.backbone.forward_with_filtered_kwargs(
lowercase_ , output_hidden_states=lowercase_ , output_attentions=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = outputs.feature_maps
SCREAMING_SNAKE_CASE_ : int = self.decode_head(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = nn.functional.interpolate(lowercase_ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=lowercase_)
SCREAMING_SNAKE_CASE_ : int = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE_ : Any = self.auxiliary_head(lowercase_)
SCREAMING_SNAKE_CASE_ : str = nn.functional.interpolate(
lowercase_ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=lowercase_)
SCREAMING_SNAKE_CASE_ : str = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''')
else:
# compute weighted loss
SCREAMING_SNAKE_CASE_ : List[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)
SCREAMING_SNAKE_CASE_ : Dict = loss_fct(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = loss_fct(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[int] = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE_ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 318 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _A (__a ) -> Dict:
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A (__a , __a ) -> Tuple[Dict, Dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : int = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : int = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : int = val
else:
SCREAMING_SNAKE_CASE_ : Any = val
return state_dict, enc_dec_proj_state_dict
def _A (__a ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : List[str] = 15_36
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : int = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a )
SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__a ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : str = 20_48
SCREAMING_SNAKE_CASE_ : List[Any] = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Union[str, Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : Tuple = original_name.split(""".""")[0]
a_ : List[Any] = key.split(""".""")
a_ : List[Any] = int(key_list[key_list.index(a__) - 2])
a_ : Dict = int(key_list[key_list.index(a__) - 1])
a_ : Any = orig_block_num - offset
a_ : Optional[int] = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''')
return key
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = OrderedDict()
a_ , a_ : Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith("""network"""):
a_ : str = key.replace("""network""" , """poolformer.encoder""")
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""") and "patch_embed" not in key:
patch_emb_offset += 1
a_ : Tuple = key[: key.find("""proj""")]
a_ : Dict = key.replace(a__ , f'''patch_embeddings.{total_embed_found}.''')
a_ : Optional[Any] = key.replace("""proj""" , """projection""")
if key.endswith("""bias"""):
total_embed_found += 1
if "patch_embeddings" in key:
a_ : int = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
a_ : Union[str, Any] = replace_key_with_offset(a__ , a__ , """mlp.fc1""" , """output.conv1""")
if "mlp.fc2" in key:
a_ : str = replace_key_with_offset(a__ , a__ , """mlp.fc2""" , """output.conv2""")
if "norm1" in key:
a_ : str = replace_key_with_offset(a__ , a__ , """norm1""" , """before_norm""")
if "norm2" in key:
a_ : Any = replace_key_with_offset(a__ , a__ , """norm2""" , """after_norm""")
if "layer_scale_1" in key:
a_ : List[Any] = replace_key_with_offset(a__ , a__ , """layer_scale_1""" , """layer_scale_1""")
if "layer_scale_2" in key:
a_ : Optional[Any] = replace_key_with_offset(a__ , a__ , """layer_scale_2""" , """layer_scale_2""")
if "head" in key:
a_ : Optional[Any] = key.replace("""head""" , """classifier""")
a_ : Union[str, Any] = value
return new_state_dict
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Any = Image.open(requests.get(a__ , stream=a__).raw)
return image
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : str = PoolFormerConfig()
# set attributes based on model_name
a_ : Union[str, Any] = """huggingface/label-files"""
a_ : str = model_name[-3:]
a_ : Tuple = 1_0_0_0
a_ : List[str] = """imagenet-1k-id2label.json"""
a_ : Any = (1, 1_0_0_0)
# set config attributes
a_ : Optional[Any] = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""") , """r"""))
a_ : List[Any] = {int(a__): v for k, v in idalabel.items()}
a_ : Tuple = idalabel
a_ : int = {v: k for k, v in idalabel.items()}
if size == "s12":
a_ : Optional[int] = [2, 2, 6, 2]
a_ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : List[Any] = 4.0
a_ : Tuple = 0.9
elif size == "s24":
a_ : List[Any] = [4, 4, 1_2, 4]
a_ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : List[Any] = 4.0
a_ : Optional[Any] = 0.9
elif size == "s36":
a_ : str = [6, 6, 1_8, 6]
a_ : Dict = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : Optional[int] = 4.0
a_ : Optional[int] = 1e-6
a_ : Tuple = 0.9
elif size == "m36":
a_ : str = [6, 6, 1_8, 6]
a_ : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
a_ : str = 4.0
a_ : Union[str, Any] = 1e-6
a_ : str = 0.95
elif size == "m48":
a_ : List[Any] = [8, 8, 2_4, 8]
a_ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
a_ : int = 4.0
a_ : int = 1e-6
a_ : List[Any] = 0.95
else:
raise ValueError(f'''Size {size} not supported''')
# load image processor
a_ : Tuple = PoolFormerImageProcessor(crop_pct=a__)
# Prepare image
a_ : List[Any] = prepare_img()
a_ : List[str] = image_processor(images=a__ , return_tensors="""pt""").pixel_values
logger.info(f'''Converting model {model_name}...''')
# load original state dict
a_ : List[str] = torch.load(a__ , map_location=torch.device("""cpu"""))
# rename keys
a_ : List[Any] = rename_keys(a__)
# create HuggingFace model and load state dict
a_ : List[str] = PoolFormerForImageClassification(a__)
model.load_state_dict(a__)
model.eval()
# Define image processor
a_ : Tuple = PoolFormerImageProcessor(crop_pct=a__)
a_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""").pixel_values
# forward pass
a_ : Any = model(a__)
a_ : Any = outputs.logits
# define expected logit slices for different models
if size == "s12":
a_ : Union[str, Any] = torch.tensor([-0.3045, -0.6758, -0.4869])
elif size == "s24":
a_ : Optional[Any] = torch.tensor([0.4402, -0.1374, -0.8045])
elif size == "s36":
a_ : int = torch.tensor([-0.6080, -0.5133, -0.5898])
elif size == "m36":
a_ : List[str] = torch.tensor([0.3952, 0.2263, -1.2668])
elif size == "m48":
a_ : Union[str, Any] = torch.tensor([0.1167, -0.0656, -0.3423])
else:
raise ValueError(f'''Size {size} not supported''')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2)
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(a__).mkdir(exist_ok=a__)
model.save_pretrained(a__)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(a__)
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__snake_case : Optional[int] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 248 |
from __future__ import annotations
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = str(a__)
return len(a__) == 9 and set(a__) == set("""123456789""")
def _UpperCAmelCase ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1):
a_ : Dict = 1_0_0_0_0_2 * base_num
if is_9_pandigital(a__):
return candidate
for base_num in range(3_3_3 , 9_9 , -1):
a_ : Tuple = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(a__):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 | 1 |
from bisect import bisect
from itertools import accumulate
def lowerCAmelCase__ ( a__: Any , a__: int , a__: List[str] , a__: Tuple ) -> Any:
'''simple docstring'''
_UpperCAmelCase = sorted(zip(a__ , a__ ) , key=lambda a__ : x[0] / x[1] , reverse=a__ )
_UpperCAmelCase , _UpperCAmelCase = [i[0] for i in r], [i[1] for i in r]
_UpperCAmelCase = list(accumulate(a__ ) )
_UpperCAmelCase = bisect(a__ , a__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCAmelCase__ ( a__: NDArray[floataa] , a__: NDArray[floataa] , a__: list[int] , a__: int , ) -> list[float]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = coefficient_matrix.shape
_UpperCAmelCase , _UpperCAmelCase = constant_matrix.shape
if rowsa != colsa:
_UpperCAmelCase = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if colsa != 1:
_UpperCAmelCase = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if rowsa != rowsa:
_UpperCAmelCase = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(a__ )
if len(a__ ) != rowsa:
_UpperCAmelCase = (
'Number of initial values must be equal to number of rows in coefficient '
F'''matrix but received {len(a__ )} and {rowsa}'''
)
raise ValueError(a__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
_UpperCAmelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_UpperCAmelCase , _UpperCAmelCase = table.shape
strictly_diagonally_dominant(a__ )
# Iterates the whole matrix for given number of times
for _ in range(a__ ):
_UpperCAmelCase = []
for row in range(a__ ):
_UpperCAmelCase = 0
for col in range(a__ ):
if col == row:
_UpperCAmelCase = table[row][col]
elif col == cols - 1:
_UpperCAmelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCAmelCase = (temp + val) / denom
new_val.append(a__ )
_UpperCAmelCase = new_val
return [float(a__ ) for i in new_val]
def lowerCAmelCase__ ( a__: NDArray[floataa] ) -> bool:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = table.shape
_UpperCAmelCase = True
for i in range(0 , a__ ):
_UpperCAmelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """deta"""
__a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , UpperCamelCase : int=None , UpperCamelCase : List[str]=900 , UpperCamelCase : Any=2_048 , UpperCamelCase : int=6 , UpperCamelCase : List[Any]=2_048 , UpperCamelCase : int=8 , UpperCamelCase : Dict=6 , UpperCamelCase : str=1_024 , UpperCamelCase : Optional[int]=8 , UpperCamelCase : str=0.0 , UpperCamelCase : Tuple=True , UpperCamelCase : List[Any]="relu" , UpperCamelCase : List[Any]=256 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Dict=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.02 , UpperCamelCase : Dict=1.0 , UpperCamelCase : Tuple=True , UpperCamelCase : int=False , UpperCamelCase : Optional[Any]="sine" , UpperCamelCase : List[Any]=5 , UpperCamelCase : Tuple=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : List[Any]=True , UpperCamelCase : str=300 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : Dict=2 , UpperCamelCase : int=1 , UpperCamelCase : int=1 , UpperCamelCase : Tuple=5 , UpperCamelCase : Any=2 , UpperCamelCase : str=0.1 , UpperCamelCase : List[str]=0.25 , **UpperCamelCase : Dict , ):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Optional[int] = backbone_config.pop("""model_type""" )
__UpperCAmelCase : Any = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Optional[Any] = config_class.from_dict(UpperCamelCase )
__UpperCAmelCase : str = backbone_config
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : str = d_model
__UpperCAmelCase : Optional[int] = encoder_ffn_dim
__UpperCAmelCase : Union[str, Any] = encoder_layers
__UpperCAmelCase : Optional[int] = encoder_attention_heads
__UpperCAmelCase : Optional[int] = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : Optional[int] = decoder_attention_heads
__UpperCAmelCase : Union[str, Any] = dropout
__UpperCAmelCase : List[str] = attention_dropout
__UpperCAmelCase : List[str] = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : List[str] = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : Tuple = encoder_layerdrop
__UpperCAmelCase : Union[str, Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
# deformable attributes
__UpperCAmelCase : List[str] = num_feature_levels
__UpperCAmelCase : Tuple = encoder_n_points
__UpperCAmelCase : Union[str, Any] = decoder_n_points
__UpperCAmelCase : Dict = two_stage
__UpperCAmelCase : List[str] = two_stage_num_proposals
__UpperCAmelCase : Any = with_box_refine
__UpperCAmelCase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__UpperCAmelCase : Any = class_cost
__UpperCAmelCase : Union[str, Any] = bbox_cost
__UpperCAmelCase : List[str] = giou_cost
# Loss coefficients
__UpperCAmelCase : Dict = mask_loss_coefficient
__UpperCAmelCase : Optional[Any] = dice_loss_coefficient
__UpperCAmelCase : Tuple = bbox_loss_coefficient
__UpperCAmelCase : Optional[Any] = giou_loss_coefficient
__UpperCAmelCase : Any = eos_coefficient
__UpperCAmelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return self.d_model
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[Any] = self.backbone_config.to_dict()
__UpperCAmelCase : str = self.__class__.model_type
return output
| 115 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCamelCase__ ( ctypes.Structure ):
"""simple docstring"""
__a = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
__UpperCAmelCase : Dict = CursorInfo()
__UpperCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
__UpperCAmelCase : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
__UpperCAmelCase : str = CursorInfo()
__UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
__UpperCAmelCase : Union[str, Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> str:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 115 | 1 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case__ = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
snake_case__ = None
def __lowerCamelCase ( ) -> Optional[Any]:
_a : List[Any] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=lowerCAmelCase_ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=lowerCAmelCase_ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
_a : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_a : Union[str, Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
def remove_articles(lowerCAmelCase_ ):
return ARTICLES_REGEX.sub(' ' , lowerCAmelCase_ )
def white_space_fix(lowerCAmelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase_ ):
_a : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) )
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if not s:
return []
return normalize_answer(lowerCAmelCase_ ).split()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : str = get_tokens(lowerCAmelCase_ )
_a : Optional[int] = get_tokens(lowerCAmelCase_ )
_a : List[Any] = collections.Counter(lowerCAmelCase_ ) & collections.Counter(lowerCAmelCase_ )
_a : Tuple = sum(common.values() )
if len(lowerCAmelCase_ ) == 0 or len(lowerCAmelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_a : Dict = 1.0 * num_same / len(lowerCAmelCase_ )
_a : Dict = 1.0 * num_same / len(lowerCAmelCase_ )
_a : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_a : Union[str, Any] = {}
_a : Optional[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_a : Tuple = qa['id']
_a : List[Any] = [t for t in qa['answers']['text'] if normalize_answer(lowerCAmelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_a : List[str] = ['']
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
_a : Optional[int] = preds[qid]
# Take max over all gold answers
_a : List[str] = max(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for a in gold_answers )
_a : Any = max(compute_fa(lowerCAmelCase_ , lowerCAmelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : int = {}
for qid, s in scores.items():
_a : Union[str, Any] = na_probs[qid] > na_prob_thresh
if pred_na:
_a : Tuple = float(not qid_to_has_ans[qid] )
else:
_a : Union[str, Any] = s
return new_scores
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Dict:
if not qid_list:
_a : Optional[int] = len(lowerCAmelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
_a : List[Any] = len(lowerCAmelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
for k in new_eval:
_a : Optional[Any] = new_eval[k]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
plt.step(lowerCAmelCase_ , lowerCAmelCase_ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(lowerCAmelCase_ , lowerCAmelCase_ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase_ )
plt.savefig(lowerCAmelCase_ )
plt.clf()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[int]:
_a : Union[str, Any] = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : na_probs[k] )
_a : Any = 0.0
_a : List[Any] = 1.0
_a : int = 0.0
_a : Tuple = [1.0]
_a : Dict = [0.0]
_a : Any = 0.0
for i, qid in enumerate(lowerCAmelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_a : int = true_pos / float(i + 1 )
_a : List[str] = true_pos / float(lowerCAmelCase_ )
if i == len(lowerCAmelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase_ )
recalls.append(lowerCAmelCase_ )
if out_image:
plot_pr_curve(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return {"ap": 100.0 * avg_prec}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
if out_image_dir and not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
_a : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_a : Tuple = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
_a : int = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
_a : Optional[Any] = {k: float(lowerCAmelCase_ ) for k, v in qid_to_has_ans.items()}
_a : Dict = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , 'pr_exact' )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , 'pr_f1' )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , 'pr_oracle' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if not qid_list:
return
_a : Optional[Any] = [na_probs[k] for k in qid_list]
_a : Any = np.ones_like(lowerCAmelCase_ ) / float(len(lowerCAmelCase_ ) )
plt.hist(lowerCAmelCase_ , weights=lowerCAmelCase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase_ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : List[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_a : Union[str, Any] = num_no_ans
_a : Optional[Any] = cur_score
_a : List[str] = 0.0
_a : Any = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_a : Tuple = scores[qid]
else:
if preds[qid]:
_a : int = -1
else:
_a : Tuple = 0
cur_score += diff
if cur_score > best_score:
_a : List[Any] = cur_score
_a : Tuple = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase_ ), best_thresh
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a , _a : int = find_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a , _a : int = find_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Union[str, Any] = best_exact
_a : List[Any] = exact_thresh
_a : Dict = best_fa
_a : Union[str, Any] = fa_thresh
def __lowerCamelCase ( ) -> List[str]:
with open(OPTS.data_file ) as f:
_a : str = json.load(lowerCAmelCase_ )
_a : Optional[Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
_a : List[Any] = json.load(lowerCAmelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_a : int = json.load(lowerCAmelCase_ )
else:
_a : Optional[Any] = {k: 0.0 for k in preds}
_a : int = make_qid_to_has_ans(lowerCAmelCase_ ) # maps qid to True/False
_a : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if v]
_a : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
_a , _a : Optional[Any] = get_raw_scores(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = apply_no_ans_threshold(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.na_prob_thresh )
_a : List[Any] = apply_no_ans_threshold(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.na_prob_thresh )
_a : Optional[int] = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ )
if has_ans_qids:
_a : List[Any] = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ , qid_list=lowerCAmelCase_ )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , 'HasAns' )
if no_ans_qids:
_a : Dict = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ , qid_list=lowerCAmelCase_ )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
else:
print(json.dumps(lowerCAmelCase_ , indent=2 ) )
if __name__ == "__main__":
snake_case__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 371 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
set_seed(770)
__lowerCAmelCase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowerCAmelCase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCAmelCase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowerCAmelCase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_a : int = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[str]:
if model_type == "text":
_a : List[str] = BarkSemanticModel
_a : Optional[Any] = BarkSemanticConfig
_a : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
_a : Tuple = BarkCoarseModel
_a : str = BarkCoarseConfig
_a : str = BarkCoarseGenerationConfig
elif model_type == "fine":
_a : List[str] = BarkFineModel
_a : Optional[Any] = BarkFineConfig
_a : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
_a : Dict = f"""{model_type}_small""" if use_small else model_type
_a : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
_a : int = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
_a : List[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_a : Dict = model_args['vocab_size']
_a : Dict = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_a : List[Any] = model_args.pop('n_head' )
_a : Any = model_args.pop('n_embd' )
_a : List[Any] = model_args.pop('n_layer' )
_a : Optional[int] = ConfigClass(**checkpoint['model_args'] )
_a : List[str] = ModelClass(config=lowerCAmelCase_ )
_a : Tuple = GenerationConfigClass()
_a : Optional[Any] = model_generation_config
_a : Optional[Any] = checkpoint['model']
# fixup checkpoint
_a : int = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
_a : str = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
_a : List[Any] = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
_a : List[Any] = state_dict.pop(lowerCAmelCase_ )
_a : List[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_a : Tuple = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_a : Tuple = set(model.state_dict().keys() ) - set(state_dict.keys() )
_a : Optional[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
_a : Dict = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
_a : Tuple = checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss""" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[Any]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_a : Optional[int] = 'cpu' # do conversion on cpu
_a : Tuple = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
_a : List[Any] = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
_a : Any = _bark_load_model(lowerCAmelCase_ , 'cpu' , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
_a : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_a : Any = 5
_a : List[str] = 10
if model_type in ["text", "coarse"]:
_a : Dict = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_a : Dict = bark_model(lowerCAmelCase_ )[0]
_a : Tuple = model(lowerCAmelCase_ )
# take last logits
_a : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
_a : List[str] = 3
_a : List[Any] = 8
_a : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_a : Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Any:
_a : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : Any = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[Any] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[str] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_a : str = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
_a : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
_a : int = BarkFineModel.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_a : Any = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_a : Optional[Any] = BarkModel(lowerCAmelCase_ )
_a : List[str] = semantic
_a : Union[str, Any] = coarseAcoustic
_a : Optional[int] = fineAcoustic
_a : Optional[Any] = codec
_a : List[Any] = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 107 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCAmelCase_ ( snake_case_ = True,*snake_case_,**snake_case_ ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
_A : List[str] = False
if main_process_only:
_A : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*_UpperCamelCase,**_UpperCamelCase,disable=_UpperCamelCase )
| 26 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_a = 'bert-base-cased'
_a = 'google/pegasus-xsum'
_a = [' Sam ate lunch today.', 'Sams lunch ingredients.']
_a = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
_a = 'patrickvonplaten/t5-tiny-random'
_a = 'sshleifer/bart-tiny-random'
_a = 'sshleifer/tiny-mbart'
_a = 'sshleifer/tiny-marian-en-de'
def _A ( UpperCamelCase_ : Path, UpperCamelCase_ : list) -> Optional[Any]:
'''simple docstring'''
__lowercase = "\n".join(UpperCamelCase_)
Path(UpperCamelCase_).open("w").writelines(UpperCamelCase_)
def _A ( UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase_, F"""{split}.source"""), UpperCamelCase_)
_dump_articles(os.path.join(UpperCamelCase_, F"""{split}.target"""), UpperCamelCase_)
return tmp_dir
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
], )
@slow
def _lowercase ( self : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in ARTICLES )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in SUMMARIES )
__lowercase = 4
__lowercase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowercase ,__lowercase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=UpperCAmelCase__, max_target_length=UpperCAmelCase__, src_lang=UpperCAmelCase__, tgt_lang=UpperCAmelCase__, )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=2, collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowercase = shift_tokens_right(batch["labels"], tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _lowercase ( self : Dict, UpperCAmelCase__ : int ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in ARTICLES )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in SUMMARIES )
__lowercase = 4
__lowercase = LegacySeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=2_0, max_target_length=UpperCAmelCase__, )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=2, collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _lowercase ( self : str ):
__lowercase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowercase = tmp_dir.joinpath("train.source" ).open().readlines()
__lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCAmelCase__, UpperCAmelCase__, 1_2_8, UpperCAmelCase__ )
__lowercase = {x.name for x in tmp_dir.iterdir()}
__lowercase = {x.name for x in save_dir.iterdir()}
__lowercase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCAmelCase__ ) < len(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCAmelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE, reason="This test requires fairseq" )
def _lowercase ( self : List[str] ):
if not FAIRSEQ_AVAILABLE:
return
__lowercase ,__lowercase ,__lowercase = self._get_dataset(max_len=6_4 )
__lowercase = 6_4
__lowercase = ds.make_dynamic_sampler(UpperCAmelCase__, required_batch_size_multiple=UpperCAmelCase__ )
__lowercase = [len(UpperCAmelCase__ ) for x in batch_sampler]
assert len(set(UpperCAmelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) # no dropped or added examples
__lowercase = DataLoader(UpperCAmelCase__, batch_sampler=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2 )
__lowercase = []
__lowercase = []
for batch in data_loader:
__lowercase = batch["input_ids"].shape
__lowercase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowercase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(UpperCAmelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCAmelCase__ )
assert num_src_per_batch[0] == max(UpperCAmelCase__ )
if failures:
raise AssertionError(F"""too many tokens in {len(UpperCAmelCase__ )} batches""" )
def _lowercase ( self : Any ):
__lowercase ,__lowercase ,__lowercase = self._get_dataset(max_len=5_1_2 )
__lowercase = 2
__lowercase = ds.make_sortish_sampler(UpperCAmelCase__, shuffle=UpperCAmelCase__ )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2 )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2, sampler=UpperCAmelCase__ )
__lowercase = tokenizer.pad_token_id
def count_pad_tokens(UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any]="input_ids" ):
return [batch[k].eq(UpperCAmelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCAmelCase__, k="labels" ) ) < sum(count_pad_tokens(UpperCAmelCase__, k="labels" ) )
assert sum(count_pad_tokens(UpperCAmelCase__ ) ) < sum(count_pad_tokens(UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int=1_0_0_0, UpperCAmelCase__ : str=1_2_8 ):
if os.getenv("USE_REAL_DATA", UpperCAmelCase__ ):
__lowercase = "examples/seq2seq/wmt_en_ro"
__lowercase = max_len * 2 * 6_4
if not Path(UpperCAmelCase__ ).joinpath("train.len" ).exists():
save_len_file(UpperCAmelCase__, UpperCAmelCase__ )
else:
__lowercase = "examples/seq2seq/test_data/wmt_en_ro"
__lowercase = max_len * 4
save_len_file(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=UpperCAmelCase__, max_target_length=UpperCAmelCase__, n_obs=UpperCAmelCase__, )
return ds, max_tokens, tokenizer
def _lowercase ( self : Union[str, Any] ):
__lowercase ,__lowercase ,__lowercase = self._get_dataset()
__lowercase = set(DistributedSortishSampler(UpperCAmelCase__, 2_5_6, num_replicas=2, rank=0, add_extra_examples=UpperCAmelCase__ ) )
__lowercase = set(DistributedSortishSampler(UpperCAmelCase__, 2_5_6, num_replicas=2, rank=1, add_extra_examples=UpperCAmelCase__ ) )
assert idsa.intersection(UpperCAmelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
], )
def _lowercase ( self : int, UpperCAmelCase__ : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__, use_fast=UpperCAmelCase__ )
if tok_name == MBART_TINY:
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path="train", max_source_length=4, max_target_length=8, src_lang="EN", tgt_lang="FR", )
__lowercase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path="train", max_source_length=4, max_target_length=8, )
__lowercase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCAmelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCAmelCase__ ) == 0
| 144 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.