code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 705
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 0
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_lowercase : List[Any] = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
_lowercase : Any = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
_lowercase : Union[str, Any] = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def a__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 0.0
for i, j in zip(lowerCAmelCase , lowerCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase , lowerCAmelCase ) else 0.0
UpperCAmelCase = n_correct / len(lowerCAmelCase )
return {
"accuracy": accuracy,
}
| 706
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 0
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase__ ( A : str , A : str = "cpu" , A : Union[str, None] = None ):
'''simple docstring'''
UpperCAmelCase = torch.load(A , map_location=A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(A , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
UpperCAmelCase = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase = src_path
torch.save(A , A )
if __name__ == "__main__":
fire.Fire(convert)
| 707
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[str] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : str = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
_lowercase : Dict = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
_lowercase : Tuple = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[Any] = VOCAB_FILES_NAMES
__magic_name__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[Any] = SqueezeBertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : str="[UNK]" , lowerCAmelCase : Optional[int]="[SEP]" , lowerCAmelCase : int="[PAD]" , lowerCAmelCase : Union[str, Any]="[CLS]" , lowerCAmelCase : Union[str, Any]="[MASK]" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Union[str, Any] , )-> List[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowerCAmelCase , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowerCAmelCase )
UpperCAmelCase = do_lower_case
def a__( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any]=None )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 708
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 0
|
'''simple docstring'''
import math
_lowercase : Optional[int] = 10
_lowercase : Optional[int] = 7
_lowercase : str = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase__ ( A : int = 20 ):
'''simple docstring'''
UpperCAmelCase = math.comb(A , A )
UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , A )
UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__( self : List[Any] , **lowerCAmelCase : int )-> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def a__( self : str )-> str:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(lowerCAmelCase ):
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) )
@require_vision
@require_tf
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> str:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__( self : Any , **lowerCAmelCase : Union[str, Any] )-> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : Any )-> List[str]:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , tf.convert_to_tensor(lowerCAmelCase ) , tf.convert_to_tensor(lowerCAmelCase ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__( self : str , **lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def a__( self : List[Any] )-> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__( self : int )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase = [tf.convert_to_tensor(lowerCAmelCase )]
UpperCAmelCase = [torch.tensor(lowerCAmelCase )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors='''tf''' )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''tf''' )['''pixel_values'''].numpy()
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
| 710
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = "speech_to_text"
__magic_name__ : int = ["past_key_values"]
__magic_name__ : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , lowerCAmelCase : int=10000 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Optional[int]=2048 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : str=4 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]="relu" , lowerCAmelCase : Optional[Any]=256 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Dict=2 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=0 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Dict=6000 , lowerCAmelCase : Optional[int]=1024 , lowerCAmelCase : Any=2 , lowerCAmelCase : int=(5, 5) , lowerCAmelCase : Dict=1024 , lowerCAmelCase : List[str]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : Any , )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
UpperCAmelCase = num_conv_layers
UpperCAmelCase = list(lowerCAmelCase )
UpperCAmelCase = conv_channels
UpperCAmelCase = input_feat_per_channel
UpperCAmelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__magic_name__ : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
__magic_name__ : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
__magic_name__ : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
__magic_name__ : str = "question"
__magic_name__ : str = "context"
__magic_name__ : str = "answers"
@property
def a__( self : Optional[int] )-> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 712
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 0
|
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_lowercase : Dict = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
_lowercase : int = parser.parse_args()
if args.check_lib:
_lowercase : List[Any] = importlib.import_module("""transformers""")
_lowercase : Dict = Path(transformers_module.__file__).parent
else:
_lowercase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 713
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = "beit"
def __init__( self : List[Any] , lowerCAmelCase : str=8192 , lowerCAmelCase : Dict=768 , lowerCAmelCase : int=12 , lowerCAmelCase : Optional[Any]=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Optional[Any]=224 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : str=3 , lowerCAmelCase : str=False , lowerCAmelCase : str=False , lowerCAmelCase : int=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : int=True , lowerCAmelCase : Any=[3, 5, 7, 11] , lowerCAmelCase : Union[str, Any]=[1, 2, 3, 6] , lowerCAmelCase : List[str]=True , lowerCAmelCase : str=0.4 , lowerCAmelCase : Optional[Any]=256 , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Any=False , lowerCAmelCase : Any=255 , **lowerCAmelCase : Optional[Any] , )-> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = version.parse("1.11" )
@property
def a__( self : str )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a__( self : Optional[int] )-> float:
"""simple docstring"""
return 1E-4
| 714
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50
| 0
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_lowercase : List[str] = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_lowercase : str = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_lowercase : List[str] = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def a__( self : Tuple )-> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def a__( self : int , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Any=False , lowerCAmelCase : Optional[Any]=False , )-> Optional[Any]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase = np.array([re.sub(lowerCAmelCase , '''''' , lowerCAmelCase ) for x in predictions] )
UpperCAmelCase = np.array([re.sub(lowerCAmelCase , '''''' , lowerCAmelCase ) for x in references] )
else:
UpperCAmelCase = np.asarray(lowerCAmelCase )
UpperCAmelCase = np.asarray(lowerCAmelCase )
if ignore_case:
UpperCAmelCase = np.char.lower(lowerCAmelCase )
UpperCAmelCase = np.char.lower(lowerCAmelCase )
if ignore_punctuation:
UpperCAmelCase = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
UpperCAmelCase = np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
UpperCAmelCase = np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
if ignore_numbers:
UpperCAmelCase = string.digits.maketrans('''''' , '''''' , string.digits )
UpperCAmelCase = np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
UpperCAmelCase = np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
UpperCAmelCase = predictions == references
return {"exact_match": np.mean(lowerCAmelCase ) * 100}
| 715
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 0
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCAmelCase )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase = TextStreamer(lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase = cs.out[:-1]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCAmelCase )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase = TextIteratorStreamer(lowerCAmelCase )
UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
UpperCAmelCase = Thread(target=model.generate , kwargs=lowerCAmelCase )
thread.start()
UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCAmelCase )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase = TextStreamer(lowerCAmelCase , skip_prompt=lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase = cs.out[:-1]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(lowerCAmelCase )
UpperCAmelCase = -1
UpperCAmelCase = torch.ones((1, 5) , device=lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase = TextStreamer(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=1 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase = tokenizer(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCAmelCase )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
UpperCAmelCase = TextIteratorStreamer(lowerCAmelCase , timeout=0.001 )
UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
UpperCAmelCase = Thread(target=model.generate , kwargs=lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase ):
UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 716
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 0
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowercase : Optional[int] = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_lowercase : Optional[Any] = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_lowercase : Tuple = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowercase : List[Any] = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowercase : Tuple = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def lowerCamelCase__ ( A : str , A : int ):
'''simple docstring'''
for tf_name, hf_name in patterns:
UpperCAmelCase = k.replace(A , A )
return k
def lowerCamelCase__ ( A : dict , A : dict ):
'''simple docstring'''
UpperCAmelCase = BigBirdPegasusConfig(**A )
UpperCAmelCase = BigBirdPegasusForConditionalGeneration(A )
UpperCAmelCase = torch_model.state_dict()
UpperCAmelCase = {}
# separating decoder weights
UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
UpperCAmelCase = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
UpperCAmelCase = DECODER_PATTERNS
UpperCAmelCase = rename_state_dict_key(A , A )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase = v.T
UpperCAmelCase = torch.from_numpy(A )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
UpperCAmelCase = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
UpperCAmelCase = REMAINING_PATTERNS
UpperCAmelCase = rename_state_dict_key(A , A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase = v.T
UpperCAmelCase = torch.from_numpy(A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
UpperCAmelCase = mapping['''model.embed_positions.weight''']
UpperCAmelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCAmelCase , UpperCAmelCase = torch_model.load_state_dict(A , strict=A )
UpperCAmelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = tf.train.list_variables(A )
UpperCAmelCase = {}
UpperCAmelCase = ['''global_step''']
for name, shape in tqdm(A , desc='''converting tf checkpoint to dict''' ):
UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase = tf.train.load_variable(A , A )
UpperCAmelCase = array
return tf_weights
def lowerCamelCase__ ( A : str , A : str , A : dict ):
'''simple docstring'''
UpperCAmelCase = get_tf_weights_as_numpy(A )
UpperCAmelCase = convert_bigbird_pegasus(A , A )
torch_model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowercase : Union[str, Any] = parser.parse_args()
_lowercase : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 0
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowercase : Optional[int] = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : str = None , lowerCAmelCase : list = None )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
UpperCAmelCase = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.isfile(lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase , feature_script=lowerCAmelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
UpperCAmelCase = compare_against_test(
os.path.join(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = '''\n'''.join(lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
UpperCAmelCase = diff.replace(lowerCAmelCase , '''''' )
self.assertEqual(lowerCAmelCase , '''''' )
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , lowerCAmelCase )
self.one_complete_example('''complete_nlp_example.py''' , lowerCAmelCase )
def a__( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
UpperCAmelCase = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.one_complete_example('''complete_cv_example.py''' , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = False
@classmethod
def a__( cls : int )-> int:
"""simple docstring"""
super().setUpClass()
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def a__( cls : Optional[Any] )-> List[Any]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def a__( self : Any )-> Dict:
"""simple docstring"""
UpperCAmelCase = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase )
self.assertNotIn('''epoch 0:''' , lowerCAmelCase )
self.assertIn('''epoch 1:''' , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
UpperCAmelCase = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase )
if torch.cuda.is_available():
UpperCAmelCase = torch.cuda.device_count()
else:
UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , lowerCAmelCase )
self.assertIn('''epoch 1:''' , lowerCAmelCase )
else:
self.assertIn('''epoch 0:''' , lowerCAmelCase )
self.assertIn('''epoch 1:''' , lowerCAmelCase )
@slow
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase )
UpperCAmelCase = re.findall('''({.+})''' , lowerCAmelCase )
UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
UpperCAmelCase = ast.literal_eval(lowerCAmelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def a__( self : List[str] )-> Dict:
"""simple docstring"""
UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def a__( self : Tuple )-> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
UpperCAmelCase = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , '''tracking''' ) ) )
def a__( self : Any )-> List[str]:
"""simple docstring"""
UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 718
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowercase : str = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "ernie_m"
__magic_name__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Any , lowerCAmelCase : int = 250002 , lowerCAmelCase : int = 768 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 3072 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : int = 514 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 1E-05 , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : List[str]=0.0 , **lowerCAmelCase : List[str] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = classifier_dropout
UpperCAmelCase = is_decoder
UpperCAmelCase = act_dropout
| 719
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = "cvt"
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : int=[7, 3, 3] , lowerCAmelCase : Optional[Any]=[4, 2, 2] , lowerCAmelCase : int=[2, 1, 1] , lowerCAmelCase : str=[64, 192, 384] , lowerCAmelCase : Union[str, Any]=[1, 3, 6] , lowerCAmelCase : Dict=[1, 2, 10] , lowerCAmelCase : List[str]=[4.0, 4.0, 4.0] , lowerCAmelCase : int=[0.0, 0.0, 0.0] , lowerCAmelCase : Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase : Any=[0.0, 0.0, 0.1] , lowerCAmelCase : int=[True, True, True] , lowerCAmelCase : Any=[False, False, True] , lowerCAmelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase : int=[3, 3, 3] , lowerCAmelCase : List[Any]=[1, 1, 1] , lowerCAmelCase : List[Any]=[2, 2, 2] , lowerCAmelCase : Any=[1, 1, 1] , lowerCAmelCase : List[str]=[1, 1, 1] , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Optional[int]=1E-12 , **lowerCAmelCase : str , )-> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_sizes
UpperCAmelCase = patch_stride
UpperCAmelCase = patch_padding
UpperCAmelCase = embed_dim
UpperCAmelCase = num_heads
UpperCAmelCase = depth
UpperCAmelCase = mlp_ratio
UpperCAmelCase = attention_drop_rate
UpperCAmelCase = drop_rate
UpperCAmelCase = drop_path_rate
UpperCAmelCase = qkv_bias
UpperCAmelCase = cls_token
UpperCAmelCase = qkv_projection_method
UpperCAmelCase = kernel_qkv
UpperCAmelCase = padding_kv
UpperCAmelCase = stride_kv
UpperCAmelCase = padding_q
UpperCAmelCase = stride_q
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
| 720
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 0
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class UpperCamelCase__( lowerCAmelCase ):
def a__( self : Tuple )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 8
# DPR tok
UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
UpperCAmelCase = os.path.join(lowerCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
UpperCAmelCase = os.path.join(lowerCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(lowerCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def a__( self : List[Any] )-> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def a__( self : int )-> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def a__( self : str )-> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : List[str] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_dataset()
UpperCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
UpperCAmelCase = dataset
UpperCAmelCase = RagRetriever(
lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def a__( self : Optional[int] , lowerCAmelCase : bool )-> int:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_dataset()
UpperCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
UpperCAmelCase = os.path.join(self.tmpdirname , '''dataset''' )
UpperCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
UpperCAmelCase = RagRetriever(
lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase = RagRetriever(
lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase ) , )
return retriever
def a__( self : Dict )-> str:
"""simple docstring"""
UpperCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
UpperCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
UpperCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(lowerCAmelCase , open(lowerCAmelCase , '''wb''' ) )
UpperCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
UpperCAmelCase = RagRetriever(
lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever.retrieve(lowerCAmelCase , n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
UpperCAmelCase = self.get_dummy_dataset()
retriever.save_pretrained(lowerCAmelCase )
UpperCAmelCase = RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever.retrieve(lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever.retrieve(lowerCAmelCase , n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
UpperCAmelCase = RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever.retrieve(lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def a__( self : str )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever.retrieve(lowerCAmelCase , n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
UpperCAmelCase = RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever.retrieve(lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def a__( self : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_legacy_index_retriever()
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever.retrieve(lowerCAmelCase , n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
UpperCAmelCase = RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever.retrieve(lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
import torch
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase = [[5, 7], [10, 11]]
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever(lowerCAmelCase , lowerCAmelCase , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , np.ndarray )
UpperCAmelCase = retriever(
lowerCAmelCase , lowerCAmelCase , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase , return_tensors='''pt''' , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def a__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
retriever.set_ctx_encoder_tokenizer(lowerCAmelCase )
UpperCAmelCase = [[5, 7], [10, 11]]
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever(lowerCAmelCase , lowerCAmelCase , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase )
self.assertEqual(
len(lowerCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowerCAmelCase ) # check for doc token related keys in dictionary.
| 721
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "blip_text_model"
def __init__( self : Union[str, Any] , lowerCAmelCase : Dict=30524 , lowerCAmelCase : Union[str, Any]=768 , lowerCAmelCase : Optional[int]=768 , lowerCAmelCase : List[str]=3072 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : str=12 , lowerCAmelCase : Optional[Any]=8 , lowerCAmelCase : List[Any]=512 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : Optional[int]=1E-12 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Tuple=30522 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Union[str, Any]=102 , lowerCAmelCase : int=True , lowerCAmelCase : Union[str, Any]=True , **lowerCAmelCase : int , )-> List[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , sep_token_id=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = encoder_hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = projection_dim
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = is_decoder
UpperCAmelCase = use_cache
@classmethod
def a__( cls : Tuple , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : Any )-> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : int = "blip_vision_model"
def __init__( self : Optional[Any] , lowerCAmelCase : Tuple=768 , lowerCAmelCase : str=3072 , lowerCAmelCase : Dict=512 , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : int=12 , lowerCAmelCase : List[str]=384 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : str="gelu" , lowerCAmelCase : Dict=1E-5 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Optional[Any]=1E-10 , **lowerCAmelCase : Optional[Any] , )-> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = projection_dim
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = patch_size
UpperCAmelCase = image_size
UpperCAmelCase = initializer_range
UpperCAmelCase = attention_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = hidden_act
@classmethod
def a__( cls : Any , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : int )-> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[Any] = "blip"
__magic_name__ : int = True
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=512 , lowerCAmelCase : str=2.6592 , lowerCAmelCase : str=256 , **lowerCAmelCase : List[Any] , )-> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
if text_config is None:
UpperCAmelCase = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
UpperCAmelCase = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
UpperCAmelCase = BlipTextConfig(**lowerCAmelCase )
UpperCAmelCase = BlipVisionConfig(**lowerCAmelCase )
UpperCAmelCase = self.vision_config.hidden_size
UpperCAmelCase = projection_dim
UpperCAmelCase = logit_scale_init_value
UpperCAmelCase = 1.0
UpperCAmelCase = 0.02
UpperCAmelCase = image_text_hidden_size
@classmethod
def a__( cls : Union[str, Any] , lowerCAmelCase : BlipTextConfig , lowerCAmelCase : BlipVisionConfig , **lowerCAmelCase : int )-> str:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase )
def a__( self : str )-> str:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 700
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 0
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 0
|
from __future__ import annotations
_lowercase : List[Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase__ ( A : list[list[int]] , A : list[int] , A : list[int] , A : int , A : list[list[int]] , ):
'''simple docstring'''
UpperCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A ) )
] # the reference grid
UpperCAmelCase = 1
UpperCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A ) )
] # the action grid
UpperCAmelCase = init[0]
UpperCAmelCase = init[1]
UpperCAmelCase = 0
UpperCAmelCase = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase = [[f, g, x, y]]
UpperCAmelCase = False # flag that is set when search is complete
UpperCAmelCase = False # flag set if we can't find expand
while not found and not resign:
if len(A ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase = cell.pop()
UpperCAmelCase = next_cell[2]
UpperCAmelCase = next_cell[3]
UpperCAmelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase = True
else:
for i in range(len(A ) ): # to try out different valid actions
UpperCAmelCase = x + DIRECTIONS[i][0]
UpperCAmelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase = g + cost
UpperCAmelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase = 1
UpperCAmelCase = i
UpperCAmelCase = []
UpperCAmelCase = goal[0]
UpperCAmelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase = xa
UpperCAmelCase = ya
invpath.append([x, y] )
UpperCAmelCase = []
for i in range(len(A ) ):
path.append(invpath[len(A ) - 1 - i] )
return path, action
if __name__ == "__main__":
_lowercase : Tuple = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_lowercase : Optional[Any] = [0, 0]
# all coordinates are given in format [y,x]
_lowercase : Any = [len(grid) - 1, len(grid[0]) - 1]
_lowercase : str = 1
# the cost map which pushes the path closer to the goal
_lowercase : Optional[int] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_lowercase : Optional[Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_lowercase : str = 99
_lowercase : int = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 702
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 0
|
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__:
__magic_name__ : str = None
@experimental
def lowerCamelCase__ ( A : List[Any] , A : Any , A : List[Any] , A : Union[str, Any] , A : Tuple , A : Optional[Any] , A : str ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
A , A , A , A , A , A , A )
return _map_with_joblib(A , A , A , A , A , A , A )
def lowerCamelCase__ ( A : Tuple , A : List[str] , A : Any , A : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = num_proc if num_proc <= len(A ) else len(A )
UpperCAmelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(A ):
UpperCAmelCase = len(A ) // num_proc
UpperCAmelCase = len(A ) % num_proc
UpperCAmelCase = div * index + min(A , A )
UpperCAmelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(A ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(A )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(A )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
UpperCAmelCase , UpperCAmelCase = None, None
if not disable_tqdm:
UpperCAmelCase , UpperCAmelCase = (RLock(),), tqdm.set_lock
with Pool(A , initargs=A , initializer=A ) as pool:
UpperCAmelCase = pool.map(A , A )
logger.info(f"""Finished {num_proc} processes""" )
UpperCAmelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(A )} objects""" )
return mapped
def lowerCamelCase__ ( A : Dict , A : List[Any] , A : str , A : Optional[int] , A : Dict , A : Optional[int] , A : List[Any] ):
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=A ):
return joblib.Parallel()(
joblib.delayed(A )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
UpperCAmelCase = None
| 703
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = "mra"
def __init__( self : str , lowerCAmelCase : List[Any]=50265 , lowerCAmelCase : List[Any]=768 , lowerCAmelCase : Any=12 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : str=3072 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Dict=512 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Optional[Any]="absolute" , lowerCAmelCase : Any=4 , lowerCAmelCase : Union[str, Any]="full" , lowerCAmelCase : Any=0 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=1 , lowerCAmelCase : Any=0 , lowerCAmelCase : Dict=2 , **lowerCAmelCase : List[Any] , )-> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = block_per_row
UpperCAmelCase = approx_mode
UpperCAmelCase = initial_prior_first_n_blocks
UpperCAmelCase = initial_prior_diagonal_n_blocks
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 705
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 0
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 706
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__:
def __init__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : Tuple=7 , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : int=99 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[Any]=5 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=512 , lowerCAmelCase : Any=16 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Any=None , )-> Any:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=lowerCAmelCase , )
def a__( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Any )-> int:
"""simple docstring"""
UpperCAmelCase = OpenLlamaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = True
UpperCAmelCase = OpenLlamaModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
UpperCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , )
UpperCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : int , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = OpenLlamaForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Tuple , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = OpenLlamaForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# first forward pass
UpperCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase , )
UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )['''hidden_states'''][0]
UpperCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def a__( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : int = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__magic_name__ : int = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__magic_name__ : Tuple = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ : Dict = False
__magic_name__ : str = False
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = OpenLlamaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__( self : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Dict )-> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = input_dict['''input_ids''']
UpperCAmelCase = input_ids.ne(1 ).to(lowerCAmelCase )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase = OpenLlamaForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__( self : List[str] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = '''single_label_classification'''
UpperCAmelCase = input_dict['''input_ids''']
UpperCAmelCase = input_ids.ne(1 ).to(lowerCAmelCase )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase = OpenLlamaForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__( self : Tuple )-> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = '''multi_label_classification'''
UpperCAmelCase = input_dict['''input_ids''']
UpperCAmelCase = input_ids.ne(1 ).to(lowerCAmelCase )
UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase = OpenLlamaForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__( self : Optional[int] )-> str:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__( self : List[str] , lowerCAmelCase : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase = OpenLlamaModel(lowerCAmelCase )
original_model.to(lowerCAmelCase )
original_model.eval()
UpperCAmelCase = original_model(lowerCAmelCase ).last_hidden_state
UpperCAmelCase = original_model(lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase = {'''type''': scaling_type, '''factor''': 10.0}
UpperCAmelCase = OpenLlamaModel(lowerCAmelCase )
scaled_model.to(lowerCAmelCase )
scaled_model.eval()
UpperCAmelCase = scaled_model(lowerCAmelCase ).last_hidden_state
UpperCAmelCase = scaled_model(lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
| 707
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Tuple = DebertaTokenizer
__magic_name__ : str = True
__magic_name__ : Any = DebertaTokenizerFast
def a__( self : int )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCAmelCase = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase = {'''unk_token''': '''[UNK]'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def a__( self : List[str] , **lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def a__( self : Any , lowerCAmelCase : Any )-> Dict:
"""simple docstring"""
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = '''lower newer'''
return input_text, output_text
def a__( self : Any )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def a__( self : Optional[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = tokenizer('''Hello''' , '''World''' )
UpperCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , lowerCAmelCase )
@slow
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCAmelCase = tokenizer(lowerCAmelCase , padding=lowerCAmelCase )
UpperCAmelCase = [tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCAmelCase = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , lowerCAmelCase )
for expected, decoded in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 708
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 0
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_lowercase : List[str] = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase__ ( A : Dict , A : str ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = _TestCommandArgs(dataset=A , all_configs=A , save_infos=A )
UpperCAmelCase = TestCommand(*A )
test_command.run()
UpperCAmelCase = os.path.join(A , '''README.md''' )
assert os.path.exists(A )
UpperCAmelCase = DatasetInfosDict.from_directory(A )
UpperCAmelCase = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase , UpperCAmelCase = getattr(dataset_infos['''default'''] , A ), getattr(expected_dataset_infos['''default'''] , A )
if key == "num_bytes":
assert is_apercent_close(A , A )
elif key == "splits":
assert list(A ) == list(A )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 710
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = 2
while True:
UpperCAmelCase = factor_map.pop(A , A )
if factor:
UpperCAmelCase = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase = factor
else:
UpperCAmelCase = prime
yield prime
prime += 1
def lowerCamelCase__ ( A : float = 1E10 ):
'''simple docstring'''
UpperCAmelCase = sieve()
UpperCAmelCase = 1
while True:
UpperCAmelCase = next(A )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(A )
n += 2
if __name__ == "__main__":
print(solution())
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 712
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 0
|
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( A : Optional[Any] , A : int ):
'''simple docstring'''
assert isinstance(A , A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCamelCase__ ( A : Any , A : Any , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = tmp_path / '''cache'''
UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase = ParquetDatasetReader(A , cache_dir=A , keep_in_memory=A ).read()
_check_parquet_dataset(A , A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCamelCase__ ( A : Optional[Any] , A : Optional[Any] , A : Tuple ):
'''simple docstring'''
UpperCAmelCase = tmp_path / '''cache'''
UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase = features.copy() if features else default_expected_features
UpperCAmelCase = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase = ParquetDatasetReader(A , features=A , cache_dir=A ).read()
_check_parquet_dataset(A , A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase__ ( A : str , A : str , A : int ):
'''simple docstring'''
UpperCAmelCase = tmp_path / '''cache'''
UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase = ParquetDatasetReader(A , cache_dir=A , split=A ).read()
_check_parquet_dataset(A , A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCamelCase__ ( A : str , A : Optional[Any] , A : int ):
'''simple docstring'''
if issubclass(A , A ):
UpperCAmelCase = parquet_path
elif issubclass(A , A ):
UpperCAmelCase = [parquet_path]
UpperCAmelCase = tmp_path / '''cache'''
UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase = ParquetDatasetReader(A , cache_dir=A ).read()
_check_parquet_dataset(A , A )
def lowerCamelCase__ ( A : List[Any] , A : str , A : Dict=("train",) ):
'''simple docstring'''
assert isinstance(A , A )
for split in splits:
UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCamelCase__ ( A : str , A : Optional[int] , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = tmp_path / '''cache'''
UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=A , keep_in_memory=A ).read()
_check_parquet_datasetdict(A , A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCamelCase__ ( A : str , A : Any , A : int ):
'''simple docstring'''
UpperCAmelCase = tmp_path / '''cache'''
UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase = features.copy() if features else default_expected_features
UpperCAmelCase = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase = ParquetDatasetReader({'''train''': parquet_path} , features=A , cache_dir=A ).read()
_check_parquet_datasetdict(A , A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple , A : List[Any] ):
'''simple docstring'''
if split:
UpperCAmelCase = {split: parquet_path}
else:
UpperCAmelCase = '''train'''
UpperCAmelCase = {'''train''': parquet_path, '''test''': parquet_path}
UpperCAmelCase = tmp_path / '''cache'''
UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase = ParquetDatasetReader(A , cache_dir=A ).read()
_check_parquet_datasetdict(A , A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( A : Any , A : Dict ):
'''simple docstring'''
UpperCAmelCase = ParquetDatasetWriter(A , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
UpperCAmelCase = pq.ParquetFile(tmp_path / '''foo.parquet''' )
UpperCAmelCase = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( A : Dict , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = str(shared_datadir / '''test_image_rgb.jpg''' )
UpperCAmelCase = {'''image''': [image_path]}
UpperCAmelCase = Features({'''image''': Image()} )
UpperCAmelCase = Dataset.from_dict(A , features=A )
UpperCAmelCase = ParquetDatasetWriter(A , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
UpperCAmelCase = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
UpperCAmelCase = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=A ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( A : str , A : int ):
'''simple docstring'''
assert get_writer_batch_size(A ) == expected
| 713
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Tuple = 16
_lowercase : Tuple = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : Any ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Dict , A : int ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize accelerator
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 715
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 0
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Any = "bart"
__magic_name__ : Tuple = ["past_key_values"]
__magic_name__ : Dict = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[str] , lowerCAmelCase : Tuple=50265 , lowerCAmelCase : str=1024 , lowerCAmelCase : int=12 , lowerCAmelCase : Optional[Any]=4096 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : Dict=12 , lowerCAmelCase : Optional[Any]=4096 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : Dict=1024 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : int=False , lowerCAmelCase : Any=True , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Any=2 , **lowerCAmelCase : Dict , )-> Tuple:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = classifier_dropout
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , lowerCAmelCase ):
UpperCAmelCase = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class UpperCamelCase__( lowerCAmelCase ):
@property
def a__( self : Tuple )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowerCAmelCase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def a__( self : Any )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(lowerCAmelCase , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowerCAmelCase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def a__( self : List[Any] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def a__( self : int , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def a__( self : Dict , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowerCAmelCase )
UpperCAmelCase = compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def a__( self : Optional[Any] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def a__( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
UpperCAmelCase = super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 716
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 0
|
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[Any] = "philschmid/bart-large-cnn-samsum"
__magic_name__ : Any = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
__magic_name__ : Optional[int] = "summarizer"
__magic_name__ : Optional[Any] = AutoTokenizer
__magic_name__ : Dict = AutoModelForSeqaSeqLM
__magic_name__ : Any = ["text"]
__magic_name__ : str = ["text"]
def a__( self : Optional[int] , lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return self.pre_processor(lowerCAmelCase , return_tensors='''pt''' , truncation=lowerCAmelCase )
def a__( self : str , lowerCAmelCase : List[Any] )-> Optional[Any]:
"""simple docstring"""
return self.model.generate(**lowerCAmelCase )[0]
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
return self.pre_processor.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 0
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 718
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( ):
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def lowerCamelCase__ ( A : List[str] ):
UpperCAmelCase = 1
UpperCAmelCase = 2
while i * i <= n:
UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase__ ( ):
return next(i for i in triangle_number_generator() if count_divisors(A ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 719
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 0
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase__ ( A : List[str] ):
'''simple docstring'''
UpperCAmelCase = []
for line in lines:
UpperCAmelCase = re.sub(R'''#.*''' , '''''' , A ) # remove comments
if line:
filtered_lines.append(A )
UpperCAmelCase = '''\n'''.join(A )
# Make a hash from all this code
UpperCAmelCase = full_str.encode('''utf-8''' )
return shaaaa(A ).hexdigest()
# get importable module names and hash for caching
_lowercase : Union[str, Any] = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase : str = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase : Dict = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
_lowercase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 720
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 0
|
'''simple docstring'''
_lowercase : Tuple = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
_lowercase : Any = ["""a""", """b""", """c""", """d""", """e"""]
def lowerCamelCase__ ( A : Any , A : List[Any] , A : Tuple ):
'''simple docstring'''
UpperCAmelCase = start
# add current to visited
visited.append(A )
UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCAmelCase = topological_sort(A , A , A )
# if all neighbors visited add current to sort
sort.append(A )
# if all vertices haven't been visited select a new one to visit
if len(A ) != len(A ):
for vertice in vertices:
if vertice not in visited:
UpperCAmelCase = topological_sort(A , A , A )
# return sort
return sort
if __name__ == "__main__":
_lowercase : Union[str, Any] = topological_sort("""a""", [], [])
print(sort)
| 721
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 0
|
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowerCamelCase__ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=False ,lowercase_=True ) -> List[str]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCamelCase : Any = cached_file(lowercase_ ,lowercase_ ,force_download=not use_cached_models )
_UpperCamelCase : str = config_class.from_json_file(lowercase_ )
_UpperCamelCase : Dict = True
_UpperCamelCase : List[str] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
_UpperCamelCase : str = model_class(lowercase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCamelCase : Any = cached_file(
lowercase_ ,lowercase_ ,force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCamelCase : Union[str, Any] = load_pytorch_checkpoint_in_tfa_model(lowercase_ ,lowercase_ )
if compare_with_pt_model:
_UpperCamelCase : Tuple = tf_model(tf_model.dummy_inputs ,training=lowercase_ ) # build the network
_UpperCamelCase : Union[str, Any] = torch.load(lowercase_ ,map_location="cpu" )
_UpperCamelCase : Optional[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowercase_ ,config=lowercase_ ,state_dict=lowercase_ )
with torch.no_grad():
_UpperCamelCase : str = pt_model(**pt_model.dummy_inputs )
_UpperCamelCase : Union[str, Any] = pto[0].numpy()
_UpperCamelCase : List[Any] = tfo[0].numpy()
_UpperCamelCase : str = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(lowercase_ ,save_format="h5" )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> Optional[int]:
"""simple docstring"""
if args_model_type is None:
_UpperCamelCase : Dict = list(MODEL_CLASSES.keys() )
else:
_UpperCamelCase : int = [args_model_type]
for j, model_type in enumerate(lowercase_ ,start=1 ):
print("=" * 100 )
print(F''' Converting model type {j}/{len(lowercase_ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCamelCase : Any = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCamelCase : Dict = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowercase_ ,lowercase_ ) ,start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
_UpperCamelCase : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(lowercase_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
_UpperCamelCase : List[Any] = cached_file(lowercase_ ,lowercase_ ,force_download=not use_cached_models )
else:
_UpperCamelCase : Any = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCamelCase : List[str] = cached_file(lowercase_ ,lowercase_ ,force_download=not use_cached_models )
else:
_UpperCamelCase : Optional[Any] = model_shortcut_name
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=lowercase_ ,pytorch_checkpoint_path=lowercase_ ,config_file=lowercase_ ,tf_dump_path=os.path.join(lowercase_ ,model_shortcut_name + "-tf_model.h5" ) ,compare_with_pt_model=lowercase_ ,)
if remove_cached_files:
os.remove(lowercase_ )
os.remove(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowerCamelCase__ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase__ = parse(importlib.metadata.version("torch"))
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
_UpperCamelCase : Optional[Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Tuple = parse(importlib.metadata.version(lowercase_ ) )
return operation(lowercase_ ,parse(lowercase_ ) )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
return compare_versions(lowercase_ ,lowercase_ ,lowercase_ )
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : str = 0
_UpperCamelCase : str = number
while duplicate > 0:
_UpperCamelCase, _UpperCamelCase : int = divmod(lowercase_ ,10 )
fact_sum += factorial(lowercase_ )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
lowerCamelCase__ = int(input("Enter number: ").strip())
print(
f"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number."""
)
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase__ ( ) -> Any:
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ) -> Tuple:
super().__init__()
_UpperCamelCase : Any = nn.Linear(3 , 4 )
_UpperCamelCase : str = nn.BatchNormad(4 )
_UpperCamelCase : List[Any] = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[Any] ) -> List[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : Dict ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [128, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase : str = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : int , __a : int ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCamelCase, _UpperCamelCase : Dict = mock_training_loop_function("hello" )
self.assertListEqual(__a , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Tuple ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__a : Optional[int] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : int , __a : Any , __a : Union[str, Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
_UpperCamelCase : Dict = torch.cuda.memory_allocated()
_UpperCamelCase : Union[str, Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
_UpperCamelCase : int = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any ) -> Union[str, Any]:
_UpperCamelCase : int = {}
def __SCREAMING_SNAKE_CASE ( self : int , __a : str ) -> None:
_UpperCamelCase : Union[str, Any] = {}
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str , __a : str , __a : float ) -> None:
if nodea not in self.connections:
self.add_node(__a )
if nodea not in self.connections:
self.add_node(__a )
_UpperCamelCase : Dict = probability
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> list[str]:
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str ) -> str:
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, int]:
"""simple docstring"""
_UpperCamelCase : Tuple = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = Counter(graph.get_nodes() )
_UpperCamelCase : Union[str, Any] = start
for _ in range(lowercase_ ):
_UpperCamelCase : List[str] = graph.transition(lowercase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ :List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE__ :Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE__ :int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Tuple = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
_UpperCamelCase : str = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}] )
_UpperCamelCase : Union[str, Any] = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
_UpperCamelCase : int = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(__a ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
_UpperCamelCase : Tuple = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
_UpperCamelCase : Any = text_classifier("This is great !" , return_all_scores=__a )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}] )
_UpperCamelCase : int = text_classifier("This is great !" , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
_UpperCamelCase : List[str] = text_classifier(["This is great !", "Something else"] , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
_UpperCamelCase : Optional[Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
import torch
_UpperCamelCase : List[str] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
_UpperCamelCase : Tuple = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
_UpperCamelCase : List[str] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
_UpperCamelCase : Union[str, Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
_UpperCamelCase : str = pipeline("text-classification" )
_UpperCamelCase : List[Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 1.0}] )
_UpperCamelCase : Optional[Any] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "NEGATIVE", "score": 1.0}] )
_UpperCamelCase : Any = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
_UpperCamelCase : Tuple = pipeline("text-classification" , framework="tf" )
_UpperCamelCase : int = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 1.0}] )
_UpperCamelCase : str = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "NEGATIVE", "score": 1.0}] )
_UpperCamelCase : Tuple = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Any , __a : str , __a : Optional[Any] ) -> int:
_UpperCamelCase : Optional[int] = TextClassificationPipeline(model=__a , tokenizer=__a )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Tuple , __a : str ) -> Tuple:
_UpperCamelCase : List[Any] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_UpperCamelCase : List[str] = "HuggingFace is in"
_UpperCamelCase : Tuple = text_classifier(__a )
self.assertEqual(nested_simplify(__a ) , [{"label": ANY(__a ), "score": ANY(__a )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
_UpperCamelCase : Dict = ["HuggingFace is in ", "Paris is in France"]
_UpperCamelCase : List[Any] = text_classifier(__a )
self.assertEqual(
nested_simplify(__a ) , [{"label": ANY(__a ), "score": ANY(__a )}, {"label": ANY(__a ), "score": ANY(__a )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_UpperCamelCase : List[Any] = text_classifier(__a , top_k=__a )
_UpperCamelCase : Optional[Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__a ) , [[{"label": ANY(__a ), "score": ANY(__a )}] * N, [{"label": ANY(__a ), "score": ANY(__a )}] * N] , )
_UpperCamelCase : List[str] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
_UpperCamelCase : List[str] = text_classifier(__a )
self.assertEqual(
nested_simplify(__a ) , {"label": ANY(__a ), "score": ANY(__a )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_UpperCamelCase : Optional[int] = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__a ):
text_classifier(__a )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_UpperCamelCase : Optional[Any] = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(__a ) , [{"label": ANY(__a ), "score": ANY(__a )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 1
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCamelCase__ = pd.read_csv("sample_data.csv", header=None)
lowerCamelCase__ = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCamelCase__ = df.iloc[:, 1:2]
lowerCamelCase__ = actual_data.values.reshape(len_data, 1)
lowerCamelCase__ = MinMaxScaler().fit_transform(actual_data)
lowerCamelCase__ = 10
lowerCamelCase__ = 5
lowerCamelCase__ = 20
lowerCamelCase__ = len_data - periods * look_back
lowerCamelCase__ = actual_data[:division]
lowerCamelCase__ = actual_data[division - look_back :]
lowerCamelCase__ , lowerCamelCase__ = [], []
lowerCamelCase__ , lowerCamelCase__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCamelCase__ = np.array(train_x)
lowerCamelCase__ = np.array(test_x)
lowerCamelCase__ = np.array([list(i.ravel()) for i in train_y])
lowerCamelCase__ = np.array([list(i.ravel()) for i in test_y])
lowerCamelCase__ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
lowerCamelCase__ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowerCamelCase__ = model.predict(x_test)
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" ,type=lowercase_ ,default="biencoder-nq-dev.json" ,help="Path to raw DPR training data" ,)
parser.add_argument(
"--evaluation_set" ,type=lowercase_ ,help="where to store parsed evaluation_set file" ,)
parser.add_argument(
"--gold_data_path" ,type=lowercase_ ,help="where to store parsed gold_data_path file" ,)
_UpperCamelCase : Union[str, Any] = parser.parse_args()
with open(args.src_path ,"r" ) as src_file, open(args.evaluation_set ,"w" ) as eval_file, open(
args.gold_data_path ,"w" ) as gold_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
for dpr_record in tqdm(lowercase_ ):
_UpperCamelCase : str = dpr_record["question"]
_UpperCamelCase : Dict = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowercase_ ) + "\n" )
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
"""simple docstring"""
import os
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
with open(os.path.dirname(lowercase_ ) + "/p022_names.txt" ) as file:
_UpperCamelCase : Optional[Any] = str(file.readlines()[0] )
_UpperCamelCase : Any = names.replace("\"" ,"" ).split("," )
names.sort()
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Union[str, Any] = 0
for i, name in enumerate(lowercase_ ):
for letter in name:
name_score += ord(lowercase_ ) - 64
total_score += (i + 1) * name_score
_UpperCamelCase : List[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 51
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = MobileBertConfig.from_json_file(lowercase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase : Tuple = MobileBertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
_UpperCamelCase : Dict = load_tf_weights_in_mobilebert(lowercase_ ,lowercase_ ,lowercase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 51
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 1
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : int = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
_UpperCamelCase : Any = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw ).convert("RGB" )
_UpperCamelCase : Dict = transforms.Compose(
[
transforms.Resize((image_size, image_size) ,interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) ,(0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
_UpperCamelCase : int = transform(lowercase_ ).unsqueeze(0 ).to(lowercase_ )
return image
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "visual_encoder" in key:
_UpperCamelCase : List[Any] = re.sub("visual_encoder*" ,"vision_model.encoder" ,lowercase_ )
if "blocks" in key:
_UpperCamelCase : str = re.sub(r"blocks" ,"layers" ,lowercase_ )
if "attn" in key:
_UpperCamelCase : Union[str, Any] = re.sub(r"attn" ,"self_attn" ,lowercase_ )
if "norm1" in key:
_UpperCamelCase : int = re.sub(r"norm1" ,"layer_norm1" ,lowercase_ )
if "norm2" in key:
_UpperCamelCase : Optional[int] = re.sub(r"norm2" ,"layer_norm2" ,lowercase_ )
if "encoder.norm" in key:
_UpperCamelCase : Tuple = re.sub(r"encoder.norm" ,"post_layernorm" ,lowercase_ )
if "encoder.patch_embed.proj" in key:
_UpperCamelCase : Dict = re.sub(r"encoder.patch_embed.proj" ,"embeddings.patch_embedding" ,lowercase_ )
if "encoder.pos_embed" in key:
_UpperCamelCase : List[Any] = re.sub(r"encoder.pos_embed" ,"embeddings.position_embedding" ,lowercase_ )
if "encoder.cls_token" in key:
_UpperCamelCase : int = re.sub(r"encoder.cls_token" ,"embeddings.class_embedding" ,lowercase_ )
if "self_attn" in key:
_UpperCamelCase : Optional[int] = re.sub(r"self_attn.proj" ,"self_attn.projection" ,lowercase_ )
return key
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ) -> Tuple:
"""simple docstring"""
if config_path is not None:
_UpperCamelCase : Dict = BlipConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : List[str] = BlipConfig(projection_dim=512 ,text_config={} ,vision_config={} )
_UpperCamelCase : Optional[int] = BlipForConditionalGeneration(lowercase_ ).eval()
_UpperCamelCase : Dict = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
_UpperCamelCase : Union[str, Any] = blip_decoder(pretrained=lowercase_ ,image_size=384 ,vit="base" )
_UpperCamelCase : Union[str, Any] = pt_model.eval()
_UpperCamelCase : Optional[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase : List[Any] = modified_state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_key(lowercase_ )
_UpperCamelCase : Any = value
hf_model.load_state_dict(lowercase_ )
_UpperCamelCase : Optional[int] = 384
_UpperCamelCase : int = load_demo_image(image_size=lowercase_ ,device="cpu" )
_UpperCamelCase : List[Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_UpperCamelCase : Union[str, Any] = tokenizer(["a picture of"] ).input_ids
_UpperCamelCase : Optional[int] = hf_model.generate(lowercase_ ,lowercase_ )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
_UpperCamelCase : List[str] = hf_model.generate(lowercase_ )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCamelCase : List[Any] = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
_UpperCamelCase : List[str] = blip_vqa(pretrained=lowercase_ ,image_size=lowercase_ ,vit="base" )
vqa_model.eval()
_UpperCamelCase : List[str] = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase : Any = modified_state_dict.pop(lowercase_ )
_UpperCamelCase : Any = rename_key(lowercase_ )
_UpperCamelCase : Union[str, Any] = value
_UpperCamelCase : List[Any] = BlipForQuestionAnswering(lowercase_ )
hf_vqa_model.load_state_dict(lowercase_ )
_UpperCamelCase : Any = ["How many dogs are in this image?"]
_UpperCamelCase : int = tokenizer(lowercase_ ,return_tensors="pt" ).input_ids
_UpperCamelCase : Tuple = hf_vqa_model.generate(lowercase_ ,lowercase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
_UpperCamelCase : List[Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
_UpperCamelCase : Optional[Any] = blip_itm(pretrained=lowercase_ ,image_size=lowercase_ ,vit="base" )
itm_model.eval()
_UpperCamelCase : str = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase : Optional[Any] = modified_state_dict.pop(lowercase_ )
_UpperCamelCase : Tuple = rename_key(lowercase_ )
_UpperCamelCase : List[str] = value
_UpperCamelCase : Union[str, Any] = BlipForImageTextRetrieval(lowercase_ )
_UpperCamelCase : Dict = ["A picture of a woman with a dog sitting in a beach"]
_UpperCamelCase : List[Any] = tokenizer(
lowercase_ ,return_tensors="pt" ,padding="max_length" ,truncation=lowercase_ ,max_length=35 ,).input_ids
hf_itm_model.load_state_dict(lowercase_ )
hf_itm_model.eval()
_UpperCamelCase : Optional[Any] = hf_itm_model(lowercase_ ,lowercase_ ,use_itm_head=lowercase_ )
_UpperCamelCase : Any = hf_itm_model(lowercase_ ,lowercase_ ,use_itm_head=lowercase_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] ,dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCamelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1_000 - i ,-1_000 - i ,-1 ) ) for i in range(1_000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
assert all(row == sorted(lowercase_ ,reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ ,reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : str = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCamelCase : Dict = (left + right) // 2
_UpperCamelCase : str = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCamelCase : Optional[Any] = mid + 1
else:
_UpperCamelCase : List[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = 0
_UpperCamelCase : List[str] = len(grid[0] )
for i in range(len(lowercase_ ) ):
_UpperCamelCase : List[str] = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def lowercase__ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
_UpperCamelCase : List[str] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCamelCase : List[str] = timeit(F'''{func}(grid=grid)''' ,setup=lowercase_ ,number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = (UnCLIPScheduler,)
def __SCREAMING_SNAKE_CASE ( self : str , **__a : Tuple ) -> Tuple:
_UpperCamelCase : Union[str, Any] = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__a )
return config
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__a , prev_timestep=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config(variance_type="fixed_small_log" )
_UpperCamelCase : int = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1e-5
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : List[str] = self.get_scheduler_config(variance_type="learned_range" )
_UpperCamelCase : Dict = scheduler_class(**__a )
_UpperCamelCase : Any = 0.5
assert scheduler._get_variance(1 , predicted_variance=__a ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__a ) - -5.7_99_80_52 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__a ) - -0.0_01_00_11 < 1e-5
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : List[Any] = self.scheduler_classes[0]
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCamelCase : Optional[Any] = scheduler_class(**__a )
_UpperCamelCase : List[Any] = scheduler.timesteps
_UpperCamelCase : str = self.dummy_model()
_UpperCamelCase : List[Any] = self.dummy_sample_deter
_UpperCamelCase : List[str] = torch.manual_seed(0 )
for i, t in enumerate(__a ):
# 1. predict noise residual
_UpperCamelCase : int = model(__a , __a )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase : Union[str, Any] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
_UpperCamelCase : Dict = pred_prev_sample
_UpperCamelCase : Optional[int] = torch.sum(torch.abs(__a ) )
_UpperCamelCase : Dict = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : Optional[int] = self.scheduler_classes[0]
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCamelCase : Any = scheduler_class(**__a )
scheduler.set_timesteps(25 )
_UpperCamelCase : Tuple = scheduler.timesteps
_UpperCamelCase : Optional[Any] = self.dummy_model()
_UpperCamelCase : int = self.dummy_sample_deter
_UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(__a ):
# 1. predict noise residual
_UpperCamelCase : List[str] = model(__a , __a )
if i + 1 == timesteps.shape[0]:
_UpperCamelCase : List[Any] = None
else:
_UpperCamelCase : int = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCamelCase : List[str] = scheduler.step(
__a , __a , __a , prev_timestep=__a , generator=__a ).prev_sample
_UpperCamelCase : Union[str, Any] = pred_prev_sample
_UpperCamelCase : List[Any] = torch.sum(torch.abs(__a ) )
_UpperCamelCase : Dict = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
pass
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_UpperCamelCase )} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "The input training data file (a text file)."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."} )
SCREAMING_SNAKE_CASE__ :float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
SCREAMING_SNAKE_CASE__ :float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
SCREAMING_SNAKE_CASE__ :int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
SCREAMING_SNAKE_CASE__ :int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = False ,lowercase_ = None ,) -> List[str]:
"""simple docstring"""
def _dataset(lowercase_ ,lowercase_=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=lowercase_ ,file_path=lowercase_ ,block_size=args.block_size ,ref_path=lowercase_ ,)
return LineByLineTextDataset(tokenizer=lowercase_ ,file_path=lowercase_ ,block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowercase_ ,file_path=lowercase_ ,block_size=args.block_size ,overwrite_cache=args.overwrite_cache ,cache_dir=lowercase_ ,)
if evaluate:
return _dataset(args.eval_data_file ,args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowercase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file ,args.train_ref_file )
def lowercase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" ,lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(model_args.config_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
_UpperCamelCase : Dict = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
_UpperCamelCase : int = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
_UpperCamelCase : List[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=lowercase_ ,cache_dir=model_args.cache_dir ,)
else:
logger.info("Training new model from scratch" )
_UpperCamelCase : List[Any] = AutoModelWithLMHead.from_config(lowercase_ )
model.resize_token_embeddings(len(lowercase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
_UpperCamelCase : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCamelCase : Union[str, Any] = min(data_args.block_size ,tokenizer.max_len )
# Get datasets
_UpperCamelCase : int = (
get_dataset(lowercase_ ,tokenizer=lowercase_ ,cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCamelCase : int = (
get_dataset(lowercase_ ,tokenizer=lowercase_ ,evaluate=lowercase_ ,cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCamelCase : Dict = DataCollatorForPermutationLanguageModeling(
tokenizer=lowercase_ ,plm_probability=data_args.plm_probability ,max_span_length=data_args.max_span_length ,)
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCamelCase : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=lowercase_ ,mlm_probability=data_args.mlm_probability )
else:
_UpperCamelCase : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=lowercase_ ,mlm=data_args.mlm ,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCamelCase : str = Trainer(
model=lowercase_ ,args=lowercase_ ,data_collator=lowercase_ ,train_dataset=lowercase_ ,eval_dataset=lowercase_ ,prediction_loss_only=lowercase_ ,)
# Training
if training_args.do_train:
_UpperCamelCase : Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowercase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase : Union[str, Any] = trainer.evaluate()
_UpperCamelCase : Dict = math.exp(eval_output["eval_loss"] )
_UpperCamelCase : int = {"perplexity": perplexity}
_UpperCamelCase : int = os.path.join(training_args.output_dir ,"eval_results_lm.txt" )
if trainer.is_world_master():
with open(lowercase_ ,"w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" ,lowercase_ ,str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(lowercase_ )
return results
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> None:
_UpperCamelCase : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_UpperCamelCase : List[str] = Vector()
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
_UpperCamelCase : str = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__a ) , "(0,0,0,0,0,1)" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
_UpperCamelCase : List[str] = Vector([1, 2, 3, 4] )
self.assertEqual(len(__a ) , 4 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> None:
_UpperCamelCase : List[Any] = Vector([1, 2] )
_UpperCamelCase : Dict = Vector([1, 2, 3, 4, 5] )
_UpperCamelCase : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_UpperCamelCase : Tuple = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
_UpperCamelCase : int = Vector([1, 2, 3] )
_UpperCamelCase : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
_UpperCamelCase : Dict = Vector([1, 2, 3] )
_UpperCamelCase : List[Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
_UpperCamelCase : Dict = Vector([1, 2, 3] )
_UpperCamelCase : Union[str, Any] = Vector([2, -1, 4] ) # for test of dot product
_UpperCamelCase : Any = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> None:
_UpperCamelCase : int = Vector([1, 2, 3] )
_UpperCamelCase : Any = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __a , __a ) ) , "(3,4,7)" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> None:
_UpperCamelCase : str = Vector([1, 0, 0, 0, 0, 0] )
_UpperCamelCase : Any = x.copy()
self.assertEqual(str(__a ) , str(__a ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
_UpperCamelCase : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__a ) , "(0,1,0)" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
_UpperCamelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> None:
_UpperCamelCase : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCamelCase : Union[str, Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
_UpperCamelCase : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCamelCase : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> None:
_UpperCamelCase : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
_UpperCamelCase : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_UpperCamelCase : Dict = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
_UpperCamelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(__a ) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> None:
_UpperCamelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> None:
_UpperCamelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCamelCase : Tuple = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
_UpperCamelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCamelCase : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 51
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 1
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : List[Any] , **__a : Optional[int] ) -> Dict:
pass
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : str = np.array(lowercase_ )
_UpperCamelCase : Any = npimg.shape
return {"hash": hashimage(lowercase_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE__ :Optional[Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = MaskGenerationPipeline(model=__a , image_processor=__a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Optional[int] , __a : Dict ) -> Union[str, Any]:
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Tuple = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
_UpperCamelCase : int = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
_UpperCamelCase : Union[str, Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : str = "facebook/sam-vit-huge"
_UpperCamelCase : str = pipeline("mask-generation" , model=__a )
_UpperCamelCase : List[Any] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_UpperCamelCase : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 51
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 1
|
"""simple docstring"""
from collections import deque
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : str , __a : int , __a : int ) -> None:
_UpperCamelCase : List[str] = process_name # process name
_UpperCamelCase : Union[str, Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_UpperCamelCase : List[str] = arrival_time
_UpperCamelCase : Dict = burst_time # remaining burst time
_UpperCamelCase : int = 0 # total time of the process wait in ready queue
_UpperCamelCase : str = 0 # time from arrival time to completion time
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __a : int , __a : list[int] , __a : deque[Process] , __a : int , ) -> None:
# total number of mlfq's queues
_UpperCamelCase : Union[str, Any] = number_of_queues
# time slice of queues that round robin algorithm applied
_UpperCamelCase : int = time_slices
# unfinished process is in this ready_queue
_UpperCamelCase : Any = queue
# current time
_UpperCamelCase : Union[str, Any] = current_time
# finished process is in this sequence queue
_UpperCamelCase : deque[Process] = deque()
def __SCREAMING_SNAKE_CASE ( self : Any ) -> list[str]:
_UpperCamelCase : int = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __SCREAMING_SNAKE_CASE ( self : str , __a : list[Process] ) -> list[int]:
_UpperCamelCase : int = []
for i in range(len(__a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : list[Process] ) -> list[int]:
_UpperCamelCase : str = []
for i in range(len(__a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : list[Process] ) -> list[int]:
_UpperCamelCase : Tuple = []
for i in range(len(__a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __SCREAMING_SNAKE_CASE ( self : Any , __a : deque[Process] ) -> deque[Process]:
_UpperCamelCase : deque[Process] = deque() # sequence deque of finished process
while len(__a ) != 0:
_UpperCamelCase : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_UpperCamelCase : List[str] = 0
# set the process's turnaround time because it is finished
_UpperCamelCase : Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
_UpperCamelCase : Optional[int] = self.current_time
# add the process to queue that has finished queue
finished.append(__a )
self.finish_queue.extend(__a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __SCREAMING_SNAKE_CASE ( self : str , __a : deque[Process] , __a : int ) -> tuple[deque[Process], deque[Process]]:
_UpperCamelCase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__a ) ):
_UpperCamelCase : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_UpperCamelCase : Dict = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_UpperCamelCase : str = 0
# set the finish time
_UpperCamelCase : int = self.current_time
# update the process' turnaround time because it is finished
_UpperCamelCase : Any = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__a )
self.finish_queue.extend(__a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
_UpperCamelCase, _UpperCamelCase : Any = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase__ = Process("P1", 0, 53)
lowerCamelCase__ = Process("P2", 0, 17)
lowerCamelCase__ = Process("P3", 0, 68)
lowerCamelCase__ = Process("P4", 0, 24)
lowerCamelCase__ = 3
lowerCamelCase__ = [17, 25]
lowerCamelCase__ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCamelCase__ = Process("P1", 0, 53)
lowerCamelCase__ = Process("P2", 0, 17)
lowerCamelCase__ = Process("P3", 0, 68)
lowerCamelCase__ = Process("P4", 0, 24)
lowerCamelCase__ = 3
lowerCamelCase__ = [17, 25]
lowerCamelCase__ = deque([Pa, Pa, Pa, Pa])
lowerCamelCase__ = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase__ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 51
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase : int = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
_UpperCamelCase : List[str] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
_UpperCamelCase : List[str] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
_UpperCamelCase : List[str] = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCamelCase : List[str] = model(__a )["last_hidden_state"].detach()
self.assertEqual(output.shape , __a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __a , atol=1e-3 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[str] = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
_UpperCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
_UpperCamelCase : Optional[Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
_UpperCamelCase : str = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCamelCase : List[Any] = model(__a )["last_hidden_state"].detach()
self.assertEqual(output.shape , __a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __a , atol=1e-3 ) )
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_="attention" ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : int = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Dict = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=False ) -> Dict:
"""simple docstring"""
if split_mlp_wi:
_UpperCamelCase : Dict = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Union[str, Any] = (wi_a, wi_a)
else:
_UpperCamelCase : Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowercase__ ( lowercase_ ,*, lowercase_ ,lowercase_ ,lowercase_ = False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : int = traverse_util.flatten_dict(variables["target"] )
_UpperCamelCase : str = {"/".join(lowercase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : Union[str, Any] = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" ,lowercase_ )
_UpperCamelCase : List[Any] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old["token_embedder/embedding"]
# Encoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : int = tax_layer_norm_lookup(lowercase_ ,lowercase_ ,"encoder" ,"pre_attention_layer_norm" )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = tax_attention_lookup(lowercase_ ,lowercase_ ,"encoder" ,"attention" )
_UpperCamelCase : List[Any] = layer_norm
_UpperCamelCase : str = k.T
_UpperCamelCase : Optional[Any] = o.T
_UpperCamelCase : List[str] = q.T
_UpperCamelCase : int = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(lowercase_ ,lowercase_ ,"encoder" ,"pre_mlp_layer_norm" )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(lowercase_ ,lowercase_ ,"encoder" ,lowercase_ )
_UpperCamelCase : int = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[int] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : Any = wi.T
_UpperCamelCase : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Optional[int] = tax_relpos_bias_lookup(
lowercase_ ,lowercase_ ,"encoder" ).T
_UpperCamelCase : str = old["encoder/encoder_norm/scale"]
if not scalable_attention:
_UpperCamelCase : Any = tax_relpos_bias_lookup(
lowercase_ ,0 ,"encoder" ).T
_UpperCamelCase : str = tax_relpos_bias_lookup(
lowercase_ ,0 ,"decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : List[Any] = tax_layer_norm_lookup(lowercase_ ,lowercase_ ,"decoder" ,"pre_self_attention_layer_norm" )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = tax_attention_lookup(lowercase_ ,lowercase_ ,"decoder" ,"self_attention" )
_UpperCamelCase : List[str] = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : str = o.T
_UpperCamelCase : List[str] = q.T
_UpperCamelCase : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : Dict = tax_layer_norm_lookup(lowercase_ ,lowercase_ ,"decoder" ,"pre_cross_attention_layer_norm" )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = tax_attention_lookup(lowercase_ ,lowercase_ ,"decoder" ,"encoder_decoder_attention" )
_UpperCamelCase : List[str] = layer_norm
_UpperCamelCase : str = k.T
_UpperCamelCase : List[Any] = o.T
_UpperCamelCase : int = q.T
_UpperCamelCase : List[Any] = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Tuple = tax_layer_norm_lookup(lowercase_ ,lowercase_ ,"decoder" ,"pre_mlp_layer_norm" )
_UpperCamelCase, _UpperCamelCase : Tuple = tax_mlp_lookup(lowercase_ ,lowercase_ ,"decoder" ,lowercase_ )
_UpperCamelCase : Optional[int] = layer_norm
if split_mlp_wi:
_UpperCamelCase : List[Any] = wi[0].T
_UpperCamelCase : Optional[int] = wi[1].T
else:
_UpperCamelCase : str = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Optional[int] = tax_relpos_bias_lookup(lowercase_ ,lowercase_ ,"decoder" ).T
_UpperCamelCase : Tuple = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : Any = old["decoder/logits_dense/kernel"].T
return new
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_UpperCamelCase : int = state_dict["shared.weight"]
return state_dict
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : int = checkpoints.load_tax_checkpoint(lowercase_ )
_UpperCamelCase : List[str] = convert_tax_to_pytorch(
lowercase_ ,num_layers=config.num_layers ,is_encoder_only=lowercase_ ,scalable_attention=lowercase_ )
_UpperCamelCase : Union[str, Any] = make_state_dict(lowercase_ ,lowercase_ )
model.load_state_dict(lowercase_ ,strict=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = False ,lowercase_ = False ,) -> Tuple:
"""simple docstring"""
_UpperCamelCase : int = MTaConfig.from_json_file(lowercase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(lowercase_ )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(lowercase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase_ )
print("Done" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
lowerCamelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : dict[str, list[str]] , __a : str ) -> None:
_UpperCamelCase : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
_UpperCamelCase : dict[str, str | None] = {}
_UpperCamelCase : Any = source_vertex
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
_UpperCamelCase : Optional[Any] = {self.source_vertex}
_UpperCamelCase : str = None
_UpperCamelCase : int = [self.source_vertex] # first in first out queue
while queue:
_UpperCamelCase : Tuple = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__a )
_UpperCamelCase : Tuple = vertex
queue.append(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCamelCase : Dict = self.parent.get(__a )
if target_vertex_parent is None:
_UpperCamelCase : str = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__a )
return self.shortest_path(__a ) + F'''->{target_vertex}'''
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : Dict , __a : str=13 , __a : List[str]=7 , __a : str=6 , __a : str=17 , __a : Optional[Any]=23 , __a : Optional[int]=11 , __a : Optional[int]=True , ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Dict = seq_length
_UpperCamelCase : Dict = act_dim
_UpperCamelCase : str = state_dim
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[Any] = max_length
_UpperCamelCase : str = is_training
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
_UpperCamelCase : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_UpperCamelCase : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_UpperCamelCase : str = floats_tensor((self.batch_size, self.seq_length, 1) )
_UpperCamelCase : str = floats_tensor((self.batch_size, self.seq_length, 1) )
_UpperCamelCase : Any = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
_UpperCamelCase : str = random_attention_mask((self.batch_size, self.seq_length) )
_UpperCamelCase : List[Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str] , __a : Optional[int] , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : str , __a : Tuple , ) -> Tuple:
_UpperCamelCase : Any = DecisionTransformerModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a , __a , __a , __a , __a , __a )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Tuple = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = ()
SCREAMING_SNAKE_CASE__ :int = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE__ :Tuple = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :Any = False
SCREAMING_SNAKE_CASE__ :Optional[int] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[int] = False
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :List[str] = False
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = DecisionTransformerModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = DecisionTransformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Tuple = [*signature.parameters.keys()]
_UpperCamelCase : List[str] = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__a )] , __a )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = 2 # number of steps of autoregressive prediction we will perform
_UpperCamelCase : List[Any] = 10 # defined by the RL environment, may be normalized
_UpperCamelCase : Tuple = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
_UpperCamelCase : Optional[Any] = model.to(__a )
_UpperCamelCase : str = model.config
torch.manual_seed(0 )
_UpperCamelCase : List[str] = torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ) # env.reset()
_UpperCamelCase : int = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__a )
_UpperCamelCase : List[Any] = torch.tensor(__a , device=__a , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_UpperCamelCase : List[Any] = state
_UpperCamelCase : Any = torch.zeros(1 , 0 , config.act_dim , device=__a , dtype=torch.floataa )
_UpperCamelCase : int = torch.zeros(1 , 0 , device=__a , dtype=torch.floataa )
_UpperCamelCase : List[Any] = torch.tensor(0 , device=__a , dtype=torch.long ).reshape(1 , 1 )
for step in range(__a ):
_UpperCamelCase : Dict = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__a )] , dim=1 )
_UpperCamelCase : int = torch.cat([rewards, torch.zeros(1 , 1 , device=__a )] , dim=1 )
_UpperCamelCase : str = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = model(
states=__a , actions=__a , rewards=__a , returns_to_go=__a , timesteps=__a , attention_mask=__a , return_dict=__a , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ),
1.0,
False,
{},
)
_UpperCamelCase : Tuple = action_pred[0, -1]
_UpperCamelCase : Dict = torch.cat([states, state] , dim=1 )
_UpperCamelCase : Dict = returns_to_go[0, -1] - reward
_UpperCamelCase : Optional[int] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_UpperCamelCase : Any = torch.cat(
[timesteps, torch.ones((1, 1) , device=__a , dtype=torch.long ) * (step + 1)] , dim=1 )
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE__ :Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
SCREAMING_SNAKE_CASE__ :List[Any] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
SCREAMING_SNAKE_CASE__ :int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ :str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ :str = frozenset([] )
SCREAMING_SNAKE_CASE__ :Tuple = True
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : Union[str, Any] = (16, 16)
_UpperCamelCase : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
torch.manual_seed(0 )
_UpperCamelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=__a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=__a , only_cross_attention=__a , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
_UpperCamelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
_UpperCamelCase : Optional[Any] = EulerDiscreteScheduler(prediction_type="sample" )
_UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="quick_gelu" , projection_dim=512 , )
_UpperCamelCase : List[str] = CLIPTextModel(__a )
_UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase : Any = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int , __a : Tuple=0 ) -> Dict:
if str(__a ).startswith("mps" ):
_UpperCamelCase : Dict = torch.manual_seed(__a )
else:
_UpperCamelCase : int = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = "cpu"
_UpperCamelCase : Optional[int] = self.get_dummy_components()
_UpperCamelCase : Optional[int] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Any = self.get_dummy_inputs(__a )
_UpperCamelCase : str = pipe(**__a ).images
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_UpperCamelCase : Optional[int] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
_UpperCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
super().test_save_load_local(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[int] = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : Dict = self.pipeline_class(**__a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Tuple = self.get_dummy_inputs(__a )
_UpperCamelCase : Dict = 2
_UpperCamelCase : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_UpperCamelCase : Tuple = getattr(__a , scheduler_enum.name )
_UpperCamelCase : Optional[int] = scheduler_cls.from_config(pipe.scheduler.config )
_UpperCamelCase : List[str] = pipe(**__a )[0]
outputs.append(__a )
assert check_same_shape(__a )
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : List[Any] = torch.manual_seed(33 )
_UpperCamelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
_UpperCamelCase : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_UpperCamelCase : Any = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
_UpperCamelCase : Optional[int] = pipe(__a , generator=__a , output_type="latent" ).images
_UpperCamelCase : Any = upscaler(
prompt=__a , image=__a , num_inference_steps=20 , guidance_scale=0 , generator=__a , output_type="np" , ).images[0]
_UpperCamelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
_UpperCamelCase : int = torch.manual_seed(33 )
_UpperCamelCase : int = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_UpperCamelCase : Optional[int] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
_UpperCamelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
_UpperCamelCase : str = upscaler(
prompt=__a , image=__a , num_inference_steps=20 , guidance_scale=0 , generator=__a , output_type="np" , ).images[0]
_UpperCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = XGLMTokenizer
SCREAMING_SNAKE_CASE__ :Union[str, Any] = XGLMTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :List[Any] = True
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : Tuple = XGLMTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
_UpperCamelCase : str = "<pad>"
_UpperCamelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
_UpperCamelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(__a ) , 1008 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
_UpperCamelCase : Tuple = XGLMTokenizer(__a , keep_accents=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCamelCase : Dict = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__a , f.name )
_UpperCamelCase : str = XGLMTokenizer(f.name , keep_accents=__a )
_UpperCamelCase : Tuple = pickle.dumps(__a )
pickle.loads(__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Dict = self.get_rust_tokenizer()
_UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
_UpperCamelCase : List[str] = tokenizer.tokenize(__a )
_UpperCamelCase : Union[str, Any] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : int = tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Optional[int] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : List[Any] = self.get_rust_tokenizer()
_UpperCamelCase : str = tokenizer.encode(__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = "Hello World!"
_UpperCamelCase : Optional[int] = [2, 3_1227, 4447, 35]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Optional[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
_UpperCamelCase : Tuple = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
# fmt: off
_UpperCamelCase : List[Any] = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="facebook/xglm-564M" , padding=__a , )
| 51
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase : Any = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
_UpperCamelCase : Any = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : Union[str, Any] = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
_UpperCamelCase : List[str] = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6000,
"return_attention_mask": False,
"do_normalize": True,
}
_UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
# load decoder from hub
_UpperCamelCase : List[Any] = "hf-internal-testing/ngram-beam-search-decoder"
def __SCREAMING_SNAKE_CASE ( self : Tuple , **__a : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , **__a : List[str] ) -> int:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : str , **__a : Optional[int] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = self.get_tokenizer()
_UpperCamelCase : Dict = self.get_feature_extractor()
_UpperCamelCase : Dict = self.get_decoder()
_UpperCamelCase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : List[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCamelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__a , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
_UpperCamelCase : Any = self.get_feature_extractor()
_UpperCamelCase : Dict = self.get_tokenizer()
_UpperCamelCase : str = self.get_decoder()
_UpperCamelCase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_UpperCamelCase : Union[str, Any] = floats_list((3, 1000) )
_UpperCamelCase : List[Any] = feature_extractor(__a , return_tensors="np" )
_UpperCamelCase : Optional[Any] = processor(__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : str = self.get_feature_extractor()
_UpperCamelCase : List[str] = self.get_tokenizer()
_UpperCamelCase : Union[str, Any] = self.get_decoder()
_UpperCamelCase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_UpperCamelCase : Optional[int] = "This is a test string"
_UpperCamelCase : Optional[int] = processor(text=__a )
_UpperCamelCase : Any = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> List[Any]:
np.random.seed(__a )
return np.random.rand(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = self.get_feature_extractor()
_UpperCamelCase : List[Any] = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = self.get_decoder()
_UpperCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_UpperCamelCase : Union[str, Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_UpperCamelCase : List[Any] = processor.decode(__a )
_UpperCamelCase : str = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Tuple ) -> Any:
_UpperCamelCase : str = self.get_feature_extractor()
_UpperCamelCase : int = self.get_tokenizer()
_UpperCamelCase : str = self.get_decoder()
_UpperCamelCase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_UpperCamelCase : Any = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCamelCase : List[str] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
_UpperCamelCase : List[str] = processor.batch_decode(__a , __a )
_UpperCamelCase : Any = list(__a )
with get_context("fork" ).Pool() as p:
_UpperCamelCase : Optional[int] = decoder.decode_beams_batch(__a , __a )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Dict = self.get_feature_extractor()
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : Optional[int] = self.get_decoder()
_UpperCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_UpperCamelCase : int = self._get_dummy_logits()
_UpperCamelCase : Tuple = 15
_UpperCamelCase : List[Any] = -20.0
_UpperCamelCase : int = -4.0
_UpperCamelCase : Union[str, Any] = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
_UpperCamelCase : List[str] = decoded_processor_out.text
_UpperCamelCase : Optional[int] = list(__a )
with get_context("fork" ).Pool() as pool:
_UpperCamelCase : List[str] = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
_UpperCamelCase : int = [d[0][0] for d in decoded_decoder_out]
_UpperCamelCase : List[Any] = [d[0][2] for d in decoded_decoder_out]
_UpperCamelCase : Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , __a , atol=1e-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , __a , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
_UpperCamelCase : Optional[int] = self.get_feature_extractor()
_UpperCamelCase : Union[str, Any] = self.get_tokenizer()
_UpperCamelCase : Any = self.get_decoder()
_UpperCamelCase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
_UpperCamelCase : Union[str, Any] = self._get_dummy_logits()
_UpperCamelCase : Optional[Any] = 2.0
_UpperCamelCase : Union[str, Any] = 5.0
_UpperCamelCase : Tuple = -20.0
_UpperCamelCase : str = True
_UpperCamelCase : Any = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
_UpperCamelCase : str = decoded_processor_out.text
_UpperCamelCase : Union[str, Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("fork" ).Pool() as pool:
_UpperCamelCase : List[str] = decoder.decode_beams_batch(
__a , __a , )
_UpperCamelCase : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __a )
_UpperCamelCase : Tuple = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
_UpperCamelCase : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCamelCase : str = processor.decoder.model_container[processor.decoder._model_key]
_UpperCamelCase : Optional[Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_UpperCamelCase : Dict = os.listdir(__a )
_UpperCamelCase : Dict = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : List[str] = snapshot_download("hf-internal-testing/processor_with_lm" )
_UpperCamelCase : int = WavaVecaProcessorWithLM.from_pretrained(__a )
_UpperCamelCase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
_UpperCamelCase : Dict = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_UpperCamelCase : Dict = os.listdir(__a )
_UpperCamelCase : Optional[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCamelCase : Dict = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCamelCase : Dict = floats_list((3, 1000) )
_UpperCamelCase : Optional[Any] = processor_wavaveca(__a , return_tensors="np" )
_UpperCamelCase : Dict = processor_auto(__a , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_UpperCamelCase : List[str] = self._get_dummy_logits()
_UpperCamelCase : str = processor_wavaveca.batch_decode(__a )
_UpperCamelCase : str = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase : Any = self.get_feature_extractor()
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Any = self.get_decoder()
_UpperCamelCase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Optional[Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Dict = [d[key] for d in offsets]
return retrieved_list
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCamelCase : int = self._get_dummy_logits()[0]
_UpperCamelCase : List[str] = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCamelCase : List[Any] = self._get_dummy_logits()
_UpperCamelCase : str = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__a , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
import torch
_UpperCamelCase : Optional[int] = load_dataset("common_voice" , "en" , split="train" , streaming=__a )
_UpperCamelCase : Optional[int] = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6000 ) )
_UpperCamelCase : Any = iter(__a )
_UpperCamelCase : Union[str, Any] = next(__a )
_UpperCamelCase : Optional[Any] = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
_UpperCamelCase : List[Any] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCamelCase : int = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(__a ).logits.cpu().numpy()
_UpperCamelCase : Dict = processor.decode(logits[0] , output_word_offsets=__a )
_UpperCamelCase : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCamelCase : str = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
_UpperCamelCase : Dict = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__a , "word" ) ) , __a )
self.assertEqual(" ".join(self.get_from_offsets(__a , "word" ) ) , output.text )
# output times
_UpperCamelCase : List[str] = torch.tensor(self.get_from_offsets(__a , "start_time" ) )
_UpperCamelCase : List[Any] = torch.tensor(self.get_from_offsets(__a , "end_time" ) )
# fmt: off
_UpperCamelCase : List[Any] = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
_UpperCamelCase : Tuple = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = args.pruning_method
_UpperCamelCase : int = args.threshold
_UpperCamelCase : Optional[Any] = args.model_name_or_path.rstrip("/" )
_UpperCamelCase : str = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
_UpperCamelCase : Dict = torch.load(os.path.join(lowercase_ ,"pytorch_model.bin" ) )
_UpperCamelCase : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCamelCase : List[Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_UpperCamelCase : str = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
_UpperCamelCase : List[str] = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_UpperCamelCase : List[Any] = MagnitudeBinarizer.apply(inputs=lowercase_ ,threshold=lowercase_ )
_UpperCamelCase : List[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCamelCase : Tuple = name[:-6]
_UpperCamelCase : Union[str, Any] = model[F'''{prefix_}mask_scores''']
_UpperCamelCase : List[str] = TopKBinarizer.apply(lowercase_ ,lowercase_ )
_UpperCamelCase : Optional[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCamelCase : List[str] = name[:-6]
_UpperCamelCase : Union[str, Any] = model[F'''{prefix_}mask_scores''']
_UpperCamelCase : Tuple = ThresholdBinarizer.apply(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Union[str, Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCamelCase : str = name[:-6]
_UpperCamelCase : List[Any] = model[F'''{prefix_}mask_scores''']
_UpperCamelCase, _UpperCamelCase : Optional[int] = -0.1, 1.1
_UpperCamelCase : Dict = torch.sigmoid(lowercase_ )
_UpperCamelCase : str = s * (r - l) + l
_UpperCamelCase : Any = s_bar.clamp(min=0.0 ,max=1.0 )
_UpperCamelCase : Optional[int] = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_UpperCamelCase : int = os.path.join(
os.path.dirname(lowercase_ ) ,F'''bertarized_{os.path.basename(lowercase_ )}''' )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ ,lowercase_ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(lowercase_ ,os.path.join(lowercase_ ,"pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : int = script.contents[0]
_UpperCamelCase : List[str] = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __a : int ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Dict = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> dict:
_UpperCamelCase : Optional[Any] = requests.get(self.url , headers=__a ).text
_UpperCamelCase : str = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Optional[Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 51
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> float:
"""simple docstring"""
_UpperCamelCase : str = 0.00
_UpperCamelCase : int = 0
for resistor in resistors:
if resistor <= 0:
_UpperCamelCase : str = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase_ )
first_sum += 1 / float(lowercase_ )
index += 1
return 1 / first_sum
def lowercase__ ( lowercase_ ) -> float:
"""simple docstring"""
_UpperCamelCase : str = 0.00
_UpperCamelCase : Union[str, Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_UpperCamelCase : int = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowercase__ ( lowercase_ ,lowercase_ = 16 ) -> Any:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
_UpperCamelCase : Union[str, Any] = load_dataset("glue" ,"mrpc" )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : Union[str, Any] = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=lowercase_ ,max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase : Union[str, Any] = datasets.map(
lowercase_ ,batched=lowercase_ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : str = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase : Any = 16
elif accelerator.mixed_precision != "no":
_UpperCamelCase : List[Any] = 8
else:
_UpperCamelCase : Optional[Any] = None
return tokenizer.pad(
lowercase_ ,padding="longest" ,max_length=lowercase_ ,pad_to_multiple_of=lowercase_ ,return_tensors="pt" ,)
# Instantiate dataloaders.
_UpperCamelCase : str = DataLoader(
tokenized_datasets["train"] ,shuffle=lowercase_ ,collate_fn=lowercase_ ,batch_size=lowercase_ )
_UpperCamelCase : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=lowercase_ ,collate_fn=lowercase_ ,batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ = mocked_dataloaders # noqa: F811
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,lowercase_ ) == "1":
_UpperCamelCase : Optional[Any] = 2
# Initialize accelerator
_UpperCamelCase : Dict = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : Tuple = config["lr"]
_UpperCamelCase : Any = int(config["num_epochs"] )
_UpperCamelCase : Optional[Any] = int(config["seed"] )
_UpperCamelCase : Optional[Any] = int(config["batch_size"] )
_UpperCamelCase : Tuple = evaluate.load("glue" ,"mrpc" )
# If the batch size is too big we use gradient accumulation
_UpperCamelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCamelCase : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_UpperCamelCase : Dict = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
_UpperCamelCase, _UpperCamelCase : List[str] = get_dataloaders(lowercase_ ,lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase : Any = AdamW(params=model.parameters() ,lr=lowercase_ )
# Instantiate scheduler
_UpperCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=lowercase_ ,num_warmup_steps=100 ,num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = accelerator.prepare(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCamelCase : List[str] = model(**lowercase_ )
_UpperCamelCase : str = outputs.loss
_UpperCamelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_UpperCamelCase : Dict = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase : List[str] = model(**lowercase_ )
_UpperCamelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCamelCase, _UpperCamelCase : Any = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_UpperCamelCase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCamelCase : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase_ ,references=lowercase_ ,)
_UpperCamelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,lowercase_ )
def lowercase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=lowercase_ ,default=lowercase_ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
_UpperCamelCase : Optional[Any] = parser.parse_args()
_UpperCamelCase : Any = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase_ ,lowercase_ )
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE__ :Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ :Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE__ :List[str] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : str = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Dict = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[Any]=False ) -> Tuple:
if class_cond:
_UpperCamelCase : Union[str, Any] = self.dummy_cond_unet
else:
_UpperCamelCase : Any = self.dummy_uncond_unet
# Default to CM multistep sampler
_UpperCamelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Any , __a : Any=0 ) -> Optional[int]:
if str(__a ).startswith("mps" ):
_UpperCamelCase : Optional[int] = torch.manual_seed(__a )
else:
_UpperCamelCase : List[str] = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : Any = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
_UpperCamelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = self.get_dummy_components()
_UpperCamelCase : Optional[int] = ConsistencyModelPipeline(**__a )
_UpperCamelCase : int = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Dict = self.get_dummy_inputs(__a )
_UpperCamelCase : Optional[Any] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCamelCase : List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Union[str, Any] = self.get_dummy_components(class_cond=__a )
_UpperCamelCase : List[Any] = ConsistencyModelPipeline(**__a )
_UpperCamelCase : Tuple = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Tuple = self.get_dummy_inputs(__a )
_UpperCamelCase : Tuple = 0
_UpperCamelCase : List[str] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Any = self.get_dummy_components()
_UpperCamelCase : List[Any] = ConsistencyModelPipeline(**__a )
_UpperCamelCase : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(__a )
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Optional[int] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : str = image[0, -3:, -3:, -1]
_UpperCamelCase : Optional[Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Optional[Any] = self.get_dummy_components(class_cond=__a )
_UpperCamelCase : List[str] = ConsistencyModelPipeline(**__a )
_UpperCamelCase : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(__a )
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : List[str] = None
_UpperCamelCase : int = 0
_UpperCamelCase : int = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
_UpperCamelCase : Any = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[str]=0 , __a : Optional[Any]=False , __a : Optional[Any]="cpu" , __a : int=torch.floataa , __a : Union[str, Any]=(1, 3, 64, 64) ) -> Tuple:
_UpperCamelCase : Optional[int] = torch.manual_seed(__a )
_UpperCamelCase : Optional[Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
_UpperCamelCase : List[str] = self.get_fixed_latents(seed=__a , device=__a , dtype=__a , shape=__a )
_UpperCamelCase : List[Any] = latents
return inputs
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str=0 , __a : int="cpu" , __a : List[str]=torch.floataa , __a : Tuple=(1, 3, 64, 64) ) -> Dict:
if type(__a ) == str:
_UpperCamelCase : int = torch.device(__a )
_UpperCamelCase : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : List[Any] = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
return latents
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : Optional[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Any = self.get_inputs()
_UpperCamelCase : List[str] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : int = image[0, -3:, -3:, -1]
_UpperCamelCase : Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Optional[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : List[str] = self.get_inputs()
_UpperCamelCase : List[str] = 1
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Optional[int] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : int = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : Any = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Optional[int] = self.get_inputs(get_fixed_latents=__a , device=__a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
_UpperCamelCase : Union[str, Any] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Any = image[0, -3:, -3:, -1]
_UpperCamelCase : Tuple = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
_UpperCamelCase : List[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : Optional[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : int = self.get_inputs(get_fixed_latents=__a , device=__a )
_UpperCamelCase : Any = 1
_UpperCamelCase : Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
_UpperCamelCase : int = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : Any = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __a : Optional[Any] , __a : str=13 , __a : int=7 , __a : List[Any]=True , __a : Optional[int]=True , __a : Any=False , __a : Optional[int]=True , __a : Optional[Any]=99 , __a : Dict=64 , __a : Union[str, Any]=5 , __a : Union[str, Any]=4 , __a : List[Any]=64 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : Any=0.1 , __a : Dict=512 , __a : Optional[Any]=16 , __a : str=2 , __a : Union[str, Any]=0.02 , __a : List[Any]=3 , __a : List[Any]=4 , __a : Dict=None , ) -> int:
_UpperCamelCase : List[str] = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[Any] = seq_length
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Dict = use_input_mask
_UpperCamelCase : List[str] = use_token_type_ids
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Union[str, Any] = num_hidden_layers
_UpperCamelCase : List[Any] = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Any = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[Any] = num_labels
_UpperCamelCase : int = num_choices
_UpperCamelCase : List[Any] = scope
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : Tuple = None
if self.use_input_mask:
_UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Dict = None
_UpperCamelCase : Tuple = None
_UpperCamelCase : Optional[int] = None
if self.use_labels:
_UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Dict , __a : int , __a : List[Any] ) -> Optional[Any]:
_UpperCamelCase : str = MPNetModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = model(__a , __a )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Any , __a : Optional[int] , __a : List[Any] , __a : str ) -> str:
_UpperCamelCase : Tuple = MPNetForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[str] = model(
__a , attention_mask=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Dict , __a : int , __a : Optional[Any] , __a : Optional[Any] , __a : List[str] , __a : Optional[int] ) -> Dict:
_UpperCamelCase : str = self.num_labels
_UpperCamelCase : List[str] = MPNetForSequenceClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : int , __a : Tuple , __a : Any , __a : Dict , __a : Optional[int] , __a : str , __a : str ) -> List[str]:
_UpperCamelCase : int = self.num_choices
_UpperCamelCase : Any = MPNetForMultipleChoice(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Any = model(
__a , attention_mask=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[Any] , __a : Union[str, Any] , __a : str , __a : Any , __a : List[str] , __a : List[str] ) -> Optional[int]:
_UpperCamelCase : Tuple = self.num_labels
_UpperCamelCase : List[Any] = MPNetForTokenClassification(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Dict = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = self.prepare_config_and_inputs()
((_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase)) : List[str] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Dict = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : int = MPNetModelTester(self )
_UpperCamelCase : str = ConfigTester(self , config_class=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__a )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
_UpperCamelCase : Tuple = MPNetModel.from_pretrained("microsoft/mpnet-base" )
_UpperCamelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : int = model(__a )[0]
_UpperCamelCase : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : Optional[int] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __a : str , __a : List[Any]=2 , __a : int=8 , __a : Dict=True , __a : Tuple=True , __a : List[str]=True , __a : Dict=True , __a : List[str]=99 , __a : Union[str, Any]=16 , __a : Any=5 , __a : Any=2 , __a : Any=36 , __a : Tuple="gelu" , __a : Dict=0.0 , __a : Optional[int]=0.0 , __a : str=512 , __a : Optional[int]=16 , __a : Dict=2 , __a : Dict=0.02 , __a : Any=3 , __a : List[str]=4 , __a : Optional[int]=None , ) -> Any:
_UpperCamelCase : Any = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Optional[int] = seq_length
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Optional[int] = use_input_mask
_UpperCamelCase : Dict = use_token_type_ids
_UpperCamelCase : int = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : Tuple = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : List[str] = num_labels
_UpperCamelCase : Dict = num_choices
_UpperCamelCase : Any = scope
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = None
if self.use_input_mask:
_UpperCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : List[str] = None
if self.use_token_type_ids:
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase : List[Any] = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : List[str] = None
if self.use_labels:
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : List[Any] = self.get_config()
_UpperCamelCase : List[str] = 300
return config
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[str] , __a : Tuple , __a : Tuple , __a : List[Any] , __a : Optional[int] , __a : List[str] , __a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = MraModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a , attention_mask=__a , token_type_ids=__a )
_UpperCamelCase : str = model(__a , token_type_ids=__a )
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any , __a : Optional[Any] , __a : int , __a : List[str] , __a : str , __a : Tuple , __a : Optional[int] , __a : Optional[int] , __a : int , ) -> int:
_UpperCamelCase : List[Any] = True
_UpperCamelCase : str = MraModel(__a )
model.to(__a )
model.eval()
_UpperCamelCase : str = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
_UpperCamelCase : List[str] = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , )
_UpperCamelCase : Dict = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : int , __a : List[str] , __a : int , __a : int ) -> int:
_UpperCamelCase : Tuple = MraForMaskedLM(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int , __a : int , __a : int , __a : Any , __a : List[str] , __a : List[str] , __a : Dict ) -> str:
_UpperCamelCase : Union[str, Any] = MraForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Dict , __a : Any , __a : Any , __a : Optional[Any] , __a : List[Any] , __a : Optional[int] , __a : Any ) -> Optional[Any]:
_UpperCamelCase : List[Any] = self.num_labels
_UpperCamelCase : Optional[int] = MraForSequenceClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str , __a : Tuple , __a : int , __a : Any , __a : Optional[int] , __a : str , __a : str ) -> int:
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Tuple = MraForTokenClassification(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : Any , __a : Optional[int] , __a : str , __a : Any , __a : Any ) -> str:
_UpperCamelCase : Tuple = self.num_choices
_UpperCamelCase : Optional[int] = MraForMultipleChoice(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Optional[int] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : int = config_and_inputs
_UpperCamelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :List[Any] = ()
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Optional[int] = MraModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Tuple = MraModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason="MRA does not output attentions" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
_UpperCamelCase : Tuple = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
_UpperCamelCase : Optional[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCamelCase : Dict = model(__a )[0]
_UpperCamelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : Any = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
_UpperCamelCase : List[Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
_UpperCamelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(__a )[0]
_UpperCamelCase : int = 5_0265
_UpperCamelCase : str = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : List[str] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
_UpperCamelCase : Dict = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
_UpperCamelCase : Any = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_UpperCamelCase : Any = model(__a )[0]
_UpperCamelCase : Any = 5_0265
_UpperCamelCase : str = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : List[str] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : int = to_pil_image(lowercase_ )
_UpperCamelCase, _UpperCamelCase : List[str] = pil_image.size
_UpperCamelCase : Tuple = pytesseract.image_to_data(lowercase_ ,lang=lowercase_ ,output_type="dict" ,config=lowercase_ )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
_UpperCamelCase : Union[str, Any] = [idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
_UpperCamelCase : Optional[Any] = [word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Dict = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Union[str, Any] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : int = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Optional[Any] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_UpperCamelCase : str = []
for x, y, w, h in zip(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : List[Any] = [x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
_UpperCamelCase : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ ,lowercase_ ,lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = ["pixel_values"]
def __init__( self : Tuple , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : float = 1 / 255 , __a : bool = True , __a : Union[float, Iterable[float]] = None , __a : Union[float, Iterable[float]] = None , __a : bool = True , __a : Optional[str] = None , __a : Optional[str] = "" , **__a : Optional[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : str = size if size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a )
_UpperCamelCase : int = do_resize
_UpperCamelCase : Union[str, Any] = size
_UpperCamelCase : Optional[int] = resample
_UpperCamelCase : Any = do_rescale
_UpperCamelCase : List[str] = rescale_value
_UpperCamelCase : Tuple = do_normalize
_UpperCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
_UpperCamelCase : str = apply_ocr
_UpperCamelCase : List[Any] = ocr_lang
_UpperCamelCase : Dict = tesseract_config
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : int = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_UpperCamelCase : Union[str, Any] = (size["height"], size["width"])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> np.ndarray:
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : np.ndarray , __a : Union[float, Iterable[float]] , __a : Union[float, Iterable[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : str=None , __a : bool = None , __a : float = None , __a : bool = None , __a : Union[float, Iterable[float]] = None , __a : Union[float, Iterable[float]] = None , __a : bool = None , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Optional[int] , ) -> PIL.Image.Image:
_UpperCamelCase : str = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a )
_UpperCamelCase : Union[str, Any] = resample if resample is not None else self.resample
_UpperCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : Tuple = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Any = image_std if image_std is not None else self.image_std
_UpperCamelCase : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_UpperCamelCase : Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
_UpperCamelCase : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
_UpperCamelCase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
_UpperCamelCase : Any = [to_numpy_array(__a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = []
for image in images:
_UpperCamelCase, _UpperCamelCase : int = apply_tesseract(__a , __a , __a )
words_batch.append(__a )
boxes_batch.append(__a )
if do_resize:
_UpperCamelCase : List[str] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
_UpperCamelCase : Tuple = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_UpperCamelCase : Optional[int] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_UpperCamelCase : Dict = [to_channel_dimension_format(__a , __a ) for image in images]
_UpperCamelCase : Any = BatchFeature(data={"pixel_values": images} , tensor_type=__a )
if apply_ocr:
_UpperCamelCase : Union[str, Any] = words_batch
_UpperCamelCase : List[str] = boxes_batch
return data
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"labels": ClassLabel} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "labels"
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple ) -> Optional[int]:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
_UpperCamelCase : Tuple = copy.deepcopy(self )
_UpperCamelCase : Optional[Any] = self.label_schema.copy()
_UpperCamelCase : Optional[Any] = features[self.label_column]
_UpperCamelCase : Union[str, Any] = label_schema
return task_template
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 51
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 1
|
"""simple docstring"""
from pathlib import Path
import fire
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = Path(lowercase_ )
_UpperCamelCase : int = Path(lowercase_ )
dest_dir.mkdir(exist_ok=lowercase_ )
for path in src_dir.iterdir():
_UpperCamelCase : Any = [x.rstrip() for x in list(path.open().readlines() )][:n]
_UpperCamelCase : Dict = dest_dir.joinpath(path.name )
print(lowercase_ )
dest_path.open("w" ).write("\n".join(lowercase_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 51
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 1
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
_UpperCamelCase : Optional[Any] = nn.Parameter(lowercase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
_UpperCamelCase : Optional[Any] = nn.Parameter(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = np.asarray(weights[0] )
_UpperCamelCase : Any = np.asarray(weights[1] )
_UpperCamelCase : str = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(lowercase_ ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase_ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(lowercase_ ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase_ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(lowercase_ ).view(-1 ,lowercase_ ).contiguous().transpose(0 ,1 ) ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = np.asarray(weights[0] )
_UpperCamelCase : Optional[int] = np.asarray(weights[1] )
_UpperCamelCase : str = np.asarray(weights[2] )
_UpperCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(lowercase_ ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase_ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(lowercase_ ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase_ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(lowercase_ ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase_ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(lowercase_ ).view(-1 ,lowercase_ ).contiguous().transpose(0 ,1 ) ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = weights[0][0][0]
_UpperCamelCase : Any = np.asarray(layer_norm_a[0] )
_UpperCamelCase : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(lowercase_ ) ,torch.tensor(lowercase_ ) ,)
# lsh weights + output
_UpperCamelCase : List[Any] = weights[0][1]
if len(lowercase_ ) < 4:
set_layer_weights_in_torch_lsh(lowercase_ ,torch_block.attention ,lowercase_ )
else:
set_layer_weights_in_torch_local(lowercase_ ,torch_block.attention ,lowercase_ )
# intermediate weighs
_UpperCamelCase : Dict = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowercase_ ) == 4:
_UpperCamelCase : Tuple = intermediate_weights[2]
# layernorm 2
_UpperCamelCase : int = np.asarray(intermediate_weights[0][0] )
_UpperCamelCase : Optional[int] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(lowercase_ ) ,torch.tensor(lowercase_ ) ,)
# intermediate dense
_UpperCamelCase : List[Any] = np.asarray(intermediate_weights[1][0] )
_UpperCamelCase : Tuple = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(lowercase_ ).transpose(0 ,1 ).contiguous() ,torch.tensor(lowercase_ ) ,)
# intermediate out
_UpperCamelCase : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
_UpperCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(lowercase_ ).transpose(0 ,1 ).contiguous() ,torch.tensor(lowercase_ ) ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = torch_model.reformer
# word embeds
_UpperCamelCase : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(lowercase_ ) ,)
if isinstance(weights[3] ,lowercase_ ):
_UpperCamelCase : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_UpperCamelCase : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
_UpperCamelCase : Dict = nn.Parameter(torch.tensor(lowercase_ ) )
_UpperCamelCase : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowercase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_UpperCamelCase : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowercase_ ,lowercase_ ,lowercase_ )
# output layer norm
_UpperCamelCase : int = np.asarray(weights[7][0] )
_UpperCamelCase : List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(lowercase_ ) ,torch.tensor(lowercase_ ) ,)
# output embeddings
_UpperCamelCase : str = np.asarray(weights[9][0] )
_UpperCamelCase : Any = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(lowercase_ ).transpose(0 ,1 ).contiguous() ,torch.tensor(lowercase_ ) ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Dict = ReformerConfig.from_json_file(lowercase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase : Dict = ReformerModelWithLMHead(lowercase_ )
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : Optional[Any] = pickle.load(lowercase_ )["weights"]
set_model_weights_in_torch(lowercase_ ,lowercase_ ,config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 51
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :Node | None = None
SCREAMING_SNAKE_CASE__ :Node | None = None
def lowercase__ ( ) -> Node | None:
"""simple docstring"""
_UpperCamelCase : Tuple = Node(1 )
_UpperCamelCase : int = Node(2 )
_UpperCamelCase : Any = Node(3 )
_UpperCamelCase : List[Any] = Node(4 )
_UpperCamelCase : Tuple = Node(5 )
return tree
def lowercase__ ( lowercase_ ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase__ ( lowercase_ ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase__ ( lowercase_ ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def lowercase__ ( lowercase_ ) -> Sequence[Node | None]:
"""simple docstring"""
_UpperCamelCase : list[Any] = []
if root is None:
return output
_UpperCamelCase : Any = deque([root] )
while process_queue:
_UpperCamelCase : List[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase__ ( lowercase_ ,lowercase_ ) -> Sequence[Node | None]:
"""simple docstring"""
_UpperCamelCase : list[Any] = []
def populate_output(lowercase_ ,lowercase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(lowercase_ ,lowercase_ )
return output
def lowercase__ ( lowercase_ ,lowercase_ ) -> Sequence[Node | None]:
"""simple docstring"""
_UpperCamelCase : list[Any] = []
def populate_output(lowercase_ ,lowercase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(lowercase_ ,lowercase_ )
return output
def lowercase__ ( lowercase_ ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
_UpperCamelCase : list[Sequence[Node | None]] = []
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = height(lowercase_ )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowercase_ ,lowercase_ ) )
_UpperCamelCase : Dict = 1
else:
output.append(get_nodes_from_right_to_left(lowercase_ ,lowercase_ ) )
_UpperCamelCase : List[str] = 0
return output
def lowercase__ ( ) -> None: # Main function for testing.
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = make_tree()
print(F'''In-order Traversal: {inorder(lowercase_ )}''' )
print(F'''Pre-order Traversal: {preorder(lowercase_ )}''' )
print(F'''Post-order Traversal: {postorder(lowercase_ )}''' ,"\n" )
print(F'''Height of Tree: {height(lowercase_ )}''' ,"\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowercase_ ) ,"\n" )
print("Level-wise order Traversal: " )
for level in range(1 ,height(lowercase_ ) + 1 ):
print(F'''Level {level}:''' ,get_nodes_from_left_to_right(lowercase_ ,level=lowercase_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __a : str , __a : List[Any] ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = name
_UpperCamelCase : Tuple = val
def __str__( self : Dict ) -> Any:
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : List[str] , __a : Any ) -> List[Any]:
return self.val < other.val
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> Tuple:
_UpperCamelCase : List[Any] = {}
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Union[str, Any] = self.build_heap(__a )
def __getitem__( self : Any , __a : List[str] ) -> Optional[int]:
return self.get_value(__a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str] ) -> List[Any]:
return (idx - 1) // 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[str] ) -> str:
return idx * 2 + 1
def __SCREAMING_SNAKE_CASE ( self : str , __a : Dict ) -> int:
return idx * 2 + 2
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Any ) -> Optional[int]:
return self.heap_dict[key]
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[Any] ) -> List[Any]:
_UpperCamelCase : Optional[int] = len(__a ) - 1
_UpperCamelCase : Any = self.get_parent_idx(__a )
for idx, i in enumerate(__a ):
_UpperCamelCase : Tuple = idx
_UpperCamelCase : Union[str, Any] = i.val
for i in range(__a , -1 , -1 ):
self.sift_down(__a , __a )
return array
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Tuple ) -> List[Any]:
while True:
_UpperCamelCase : Dict = self.get_left_child_idx(__a ) # noqa: E741
_UpperCamelCase : Any = self.get_right_child_idx(__a )
_UpperCamelCase : Tuple = idx
if l < len(__a ) and array[l] < array[idx]:
_UpperCamelCase : Tuple = l
if r < len(__a ) and array[r] < array[smallest]:
_UpperCamelCase : str = r
if smallest != idx:
_UpperCamelCase, _UpperCamelCase : Optional[int] = array[smallest], array[idx]
(
(
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_UpperCamelCase : int = smallest
else:
break
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any ) -> Tuple:
_UpperCamelCase : List[str] = self.get_parent_idx(__a )
while p >= 0 and self.heap[p] > self.heap[idx]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = self.heap[idx], self.heap[p]
_UpperCamelCase, _UpperCamelCase : Dict = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_UpperCamelCase : List[str] = p
_UpperCamelCase : Optional[Any] = self.get_parent_idx(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.heap[0]
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase, _UpperCamelCase : str = self.heap[-1], self.heap[0]
_UpperCamelCase, _UpperCamelCase : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_UpperCamelCase : Dict = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> str:
self.heap.append(__a )
_UpperCamelCase : int = len(self.heap ) - 1
_UpperCamelCase : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return len(self.heap ) == 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[int] , __a : int ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_UpperCamelCase : Union[str, Any] = new_value
_UpperCamelCase : Optional[Any] = new_value
self.sift_up(self.idx_of_element[node] )
lowerCamelCase__ = Node("R", -1)
lowerCamelCase__ = Node("B", 6)
lowerCamelCase__ = Node("A", 3)
lowerCamelCase__ = Node("X", 1)
lowerCamelCase__ = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = "Muhammad Umer Farooq"
lowerCamelCase__ = "MIT"
lowerCamelCase__ = "1.0.0"
lowerCamelCase__ = "Muhammad Umer Farooq"
lowerCamelCase__ = "contact@muhammadumerfarooq.me"
lowerCamelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , __a : str ) -> None:
super().__init__()
_UpperCamelCase : list[str] = []
_UpperCamelCase : int = domain
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str , __a : list[tuple[str, str | None]] ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCamelCase : Union[str, Any] = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(lowercase_ ).split("." )[-2:] )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
return parse.urlparse(lowercase_ ).netloc
def lowercase__ ( lowercase_ = "https://github.com" ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : int = get_domain_name(lowercase_ )
# Initialize the parser
_UpperCamelCase : Any = Parser(lowercase_ )
try:
# Open URL
_UpperCamelCase : Optional[int] = requests.get(lowercase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCamelCase : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCamelCase : Optional[int] = requests.get(lowercase_ )
# Get the valid email.
_UpperCamelCase : Union[str, Any] = re.findall("[a-zA-Z0-9]+@" + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ :Tuple = "CLIPImageProcessor"
SCREAMING_SNAKE_CASE__ :Optional[int] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : int , __a : Any=None , __a : Optional[Any]=None , **__a : Any ) -> Optional[int]:
_UpperCamelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_UpperCamelCase : Optional[Any] = kwargs.pop("feature_extractor" )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : int=None , __a : Optional[int]=None , __a : List[Any]=None , **__a : Union[str, Any] ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
_UpperCamelCase : List[Any] = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__a : int , **__a : Tuple ) -> int:
return self.tokenizer.batch_decode(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , *__a : Tuple , **__a : int ) -> int:
return self.tokenizer.decode(*__a , **__a )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
lowerCamelCase__ = {"mobilebert-uncased": 512}
lowerCamelCase__ = {}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :int = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :List[str] = MobileBertTokenizer
def __init__( self : str , __a : str=None , __a : List[Any]=None , __a : str=True , __a : Optional[Any]="[UNK]" , __a : Dict="[SEP]" , __a : Any="[PAD]" , __a : Dict="[CLS]" , __a : Dict="[MASK]" , __a : Dict=True , __a : Dict=None , **__a : List[Any] , ) -> Any:
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
_UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __a ) != do_lower_case
or normalizer_state.get("strip_accents" , __a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars
):
_UpperCamelCase : Optional[int] = getattr(__a , normalizer_state.pop("type" ) )
_UpperCamelCase : Any = do_lower_case
_UpperCamelCase : Union[str, Any] = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : str = normalizer_class(**__a )
_UpperCamelCase : List[Any] = do_lower_case
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[Any] , __a : Optional[Any]=None ) -> Tuple:
_UpperCamelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Any = [self.sep_token_id]
_UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self : str , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase : int = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 1
|
"""simple docstring"""
from collections import defaultdict
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __a : List[str] , __a : Tuple ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_UpperCamelCase : Tuple = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__a ) )
]
_UpperCamelCase : List[Any] = defaultdict(__a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_UpperCamelCase : List[Any] = (1 << len(__a )) - 1
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[int] , __a : Any ) -> Optional[int]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_UpperCamelCase : Optional[Any] = self.count_ways_until(__a , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_UpperCamelCase : Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple ) -> List[str]:
# Store the list of persons for each task
for i in range(len(__a ) ):
for j in task_performed[i]:
self.task[j].append(__a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCamelCase__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCamelCase__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __a : int , __a : int , __a : int , __a : Dict=0.0 , __a : Optional[int] = None , __a : str = "geglu" , __a : Optional[int] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : str = "layer_norm" , __a : bool = False , ) -> List[str]:
super().__init__()
_UpperCamelCase : Tuple = only_cross_attention
_UpperCamelCase : List[Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
_UpperCamelCase : List[Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_UpperCamelCase : Union[str, Any] = AdaLayerNorm(__a , __a )
elif self.use_ada_layer_norm_zero:
_UpperCamelCase : Any = AdaLayerNormZero(__a , __a )
else:
_UpperCamelCase : Optional[Any] = nn.LayerNorm(__a , elementwise_affine=__a )
_UpperCamelCase : Union[str, Any] = Attention(
query_dim=__a , heads=__a , dim_head=__a , dropout=__a , bias=__a , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__a , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_UpperCamelCase : int = (
AdaLayerNorm(__a , __a )
if self.use_ada_layer_norm
else nn.LayerNorm(__a , elementwise_affine=__a )
)
_UpperCamelCase : Optional[Any] = Attention(
query_dim=__a , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__a , dim_head=__a , dropout=__a , bias=__a , upcast_attention=__a , ) # is self-attn if encoder_hidden_states is none
else:
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Optional[int] = None
# 3. Feed-forward
_UpperCamelCase : List[Any] = nn.LayerNorm(__a , elementwise_affine=__a )
_UpperCamelCase : Any = FeedForward(__a , dropout=__a , activation_fn=__a , final_dropout=__a )
# let chunk size default to None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Optional[Any] = 0
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[int] , __a : int ) -> Tuple:
# Sets chunk feed-forward
_UpperCamelCase : List[str] = chunk_size
_UpperCamelCase : Tuple = dim
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : torch.FloatTensor , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.LongTensor] = None , __a : Dict[str, Any] = None , __a : Optional[torch.LongTensor] = None , ) -> Optional[Any]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_UpperCamelCase : int = self.norma(__a , __a )
elif self.use_ada_layer_norm_zero:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = self.norma(
__a , __a , __a , hidden_dtype=hidden_states.dtype )
else:
_UpperCamelCase : str = self.norma(__a )
_UpperCamelCase : Optional[int] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_UpperCamelCase : Optional[int] = self.attna(
__a , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__a , **__a , )
if self.use_ada_layer_norm_zero:
_UpperCamelCase : Optional[int] = gate_msa.unsqueeze(1 ) * attn_output
_UpperCamelCase : Tuple = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_UpperCamelCase : str = (
self.norma(__a , __a ) if self.use_ada_layer_norm else self.norma(__a )
)
_UpperCamelCase : Any = self.attna(
__a , encoder_hidden_states=__a , attention_mask=__a , **__a , )
_UpperCamelCase : List[Any] = attn_output + hidden_states
# 3. Feed-forward
_UpperCamelCase : Optional[int] = self.norma(__a )
if self.use_ada_layer_norm_zero:
_UpperCamelCase : str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
_UpperCamelCase : str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_UpperCamelCase : Dict = torch.cat(
[self.ff(__a ) for hid_slice in norm_hidden_states.chunk(__a , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_UpperCamelCase : Optional[int] = self.ff(__a )
if self.use_ada_layer_norm_zero:
_UpperCamelCase : Any = gate_mlp.unsqueeze(1 ) * ff_output
_UpperCamelCase : Tuple = ff_output + hidden_states
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : int , __a : Optional[int] = None , __a : int = 4 , __a : float = 0.0 , __a : str = "geglu" , __a : bool = False , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = int(dim * mult )
_UpperCamelCase : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_UpperCamelCase : Optional[Any] = GELU(__a , __a )
if activation_fn == "gelu-approximate":
_UpperCamelCase : Dict = GELU(__a , __a , approximate="tanh" )
elif activation_fn == "geglu":
_UpperCamelCase : Dict = GEGLU(__a , __a )
elif activation_fn == "geglu-approximate":
_UpperCamelCase : List[str] = ApproximateGELU(__a , __a )
_UpperCamelCase : Union[str, Any] = nn.ModuleList([] )
# project in
self.net.append(__a )
# project dropout
self.net.append(nn.Dropout(__a ) )
# project out
self.net.append(nn.Linear(__a , __a ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Tuple ) -> List[str]:
for module in self.net:
_UpperCamelCase : int = module(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __a : int , __a : int , __a : str = "none" ) -> List[str]:
super().__init__()
_UpperCamelCase : Optional[Any] = nn.Linear(__a , __a )
_UpperCamelCase : List[str] = approximate
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : int ) -> Dict:
if gate.device.type != "mps":
return F.gelu(__a , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : int ) -> Tuple:
_UpperCamelCase : int = self.proj(__a )
_UpperCamelCase : List[Any] = self.gelu(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __a : int , __a : int ) -> str:
super().__init__()
_UpperCamelCase : Optional[Any] = nn.Linear(__a , dim_out * 2 )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Optional[Any] ) -> List[str]:
if gate.device.type != "mps":
return F.gelu(__a )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Tuple ) -> Optional[int]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = self.proj(__a ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__a )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __a : int , __a : int ) -> Any:
super().__init__()
_UpperCamelCase : str = nn.Linear(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Any ) -> str:
_UpperCamelCase : Dict = self.proj(__a )
return x * torch.sigmoid(1.7_02 * x )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Tuple ) -> Union[str, Any]:
super().__init__()
_UpperCamelCase : Dict = nn.Embedding(__a , __a )
_UpperCamelCase : Optional[int] = nn.SiLU()
_UpperCamelCase : Tuple = nn.Linear(__a , embedding_dim * 2 )
_UpperCamelCase : str = nn.LayerNorm(__a , elementwise_affine=__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str , __a : List[Any] ) -> List[Any]:
_UpperCamelCase : Optional[int] = self.linear(self.silu(self.emb(__a ) ) )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = torch.chunk(__a , 2 )
_UpperCamelCase : List[Any] = self.norm(__a ) * (1 + scale) + shift
return x
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : List[Any] , __a : Tuple ) -> List[Any]:
super().__init__()
_UpperCamelCase : Any = CombinedTimestepLabelEmbeddings(__a , __a )
_UpperCamelCase : List[str] = nn.SiLU()
_UpperCamelCase : Optional[int] = nn.Linear(__a , 6 * embedding_dim , bias=__a )
_UpperCamelCase : List[str] = nn.LayerNorm(__a , elementwise_affine=__a , eps=1e-6 )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str , __a : Optional[Any] , __a : Dict , __a : Tuple=None ) -> int:
_UpperCamelCase : Tuple = self.linear(self.silu(self.emb(__a , __a , hidden_dtype=__a ) ) )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = emb.chunk(6 , dim=1 )
_UpperCamelCase : Tuple = self.norm(__a ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : str , __a : int , __a : int , __a : int , __a : Optional[str] = None , __a : float = 1e-5 ) -> Dict:
super().__init__()
_UpperCamelCase : Optional[int] = num_groups
_UpperCamelCase : List[str] = eps
if act_fn is None:
_UpperCamelCase : int = None
else:
_UpperCamelCase : List[str] = get_activation(__a )
_UpperCamelCase : List[Any] = nn.Linear(__a , out_dim * 2 )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Union[str, Any] , __a : Any ) -> Tuple:
if self.act:
_UpperCamelCase : Any = self.act(__a )
_UpperCamelCase : Optional[Any] = self.linear(__a )
_UpperCamelCase : Tuple = emb[:, :, None, None]
_UpperCamelCase, _UpperCamelCase : Optional[int] = emb.chunk(2 , dim=1 )
_UpperCamelCase : Optional[int] = F.group_norm(__a , self.num_groups , eps=self.eps )
_UpperCamelCase : Optional[Any] = x * (1 + scale) + shift
return x
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 1
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ = 1_000_000 ) -> int:
"""simple docstring"""
_UpperCamelCase : int = set(range(3 ,lowercase_ ,2 ) )
primes.add(2 )
for p in range(3 ,lowercase_ ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,lowercase_ ,lowercase_ ) ) )
_UpperCamelCase : Union[str, Any] = [float(lowercase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase_ ,limit + 1 ,lowercase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 1
|
"""simple docstring"""
from collections import deque
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Dict = len(lowercase_ )
_UpperCamelCase : str = deque()
_UpperCamelCase : str = [False for _ in range(lowercase_ )]
_UpperCamelCase : List[Any] = [-1 for _ in range(lowercase_ )]
_UpperCamelCase : Any = index_of[:]
def strong_connect(lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : List[Any] = index # the number when this node is seen
_UpperCamelCase : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(lowercase_ )
_UpperCamelCase : int = True
for w in g[v]:
if index_of[w] == -1:
_UpperCamelCase : int = strong_connect(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_UpperCamelCase : Any = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_UpperCamelCase : str = []
_UpperCamelCase : Dict = stack.pop()
_UpperCamelCase : Dict = False
component.append(lowercase_ )
while w != v:
_UpperCamelCase : List[str] = stack.pop()
_UpperCamelCase : Tuple = False
component.append(lowercase_ )
components.append(lowercase_ )
return index
_UpperCamelCase : Tuple = []
for v in range(lowercase_ ):
if index_of[v] == -1:
strong_connect(lowercase_ ,0 ,lowercase_ )
return components
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = [[] for _ in range(lowercase_ )]
for u, v in edges:
g[u].append(lowercase_ )
return g
if __name__ == "__main__":
# Test
lowerCamelCase__ = 7
lowerCamelCase__ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowerCamelCase__ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowerCamelCase__ = [(u, v) for u, v in zip(source, target)]
lowerCamelCase__ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "table-transformer"
SCREAMING_SNAKE_CASE__ :Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , __a : Dict=True , __a : int=None , __a : Optional[Any]=3 , __a : Any=100 , __a : List[str]=6 , __a : Optional[int]=2048 , __a : List[Any]=8 , __a : str=6 , __a : Union[str, Any]=2048 , __a : Tuple=8 , __a : Dict=0.0 , __a : Any=0.0 , __a : int=True , __a : int="relu" , __a : Tuple=256 , __a : List[Any]=0.1 , __a : Optional[int]=0.0 , __a : Union[str, Any]=0.0 , __a : List[str]=0.02 , __a : Dict=1.0 , __a : Union[str, Any]=False , __a : Optional[Any]="sine" , __a : List[Any]="resnet50" , __a : List[Any]=True , __a : List[str]=False , __a : Union[str, Any]=1 , __a : str=5 , __a : str=2 , __a : Optional[Any]=1 , __a : Tuple=1 , __a : Dict=5 , __a : Tuple=2 , __a : Any=0.1 , **__a : str , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCamelCase : Dict = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__a , __a ):
_UpperCamelCase : List[Any] = backbone_config.get("model_type" )
_UpperCamelCase : int = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase : str = config_class.from_dict(__a )
# set timm attributes to None
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Union[str, Any] = None, None, None
_UpperCamelCase : List[str] = use_timm_backbone
_UpperCamelCase : Optional[int] = backbone_config
_UpperCamelCase : Union[str, Any] = num_channels
_UpperCamelCase : List[str] = num_queries
_UpperCamelCase : Union[str, Any] = d_model
_UpperCamelCase : Union[str, Any] = encoder_ffn_dim
_UpperCamelCase : Tuple = encoder_layers
_UpperCamelCase : str = encoder_attention_heads
_UpperCamelCase : Dict = decoder_ffn_dim
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Optional[Any] = decoder_attention_heads
_UpperCamelCase : Any = dropout
_UpperCamelCase : Dict = attention_dropout
_UpperCamelCase : Any = activation_dropout
_UpperCamelCase : Dict = activation_function
_UpperCamelCase : Optional[Any] = init_std
_UpperCamelCase : int = init_xavier_std
_UpperCamelCase : int = encoder_layerdrop
_UpperCamelCase : Optional[int] = decoder_layerdrop
_UpperCamelCase : Tuple = encoder_layers
_UpperCamelCase : str = auxiliary_loss
_UpperCamelCase : str = position_embedding_type
_UpperCamelCase : Optional[Any] = backbone
_UpperCamelCase : Dict = use_pretrained_backbone
_UpperCamelCase : List[Any] = dilation
# Hungarian matcher
_UpperCamelCase : List[str] = class_cost
_UpperCamelCase : Union[str, Any] = bbox_cost
_UpperCamelCase : Any = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = mask_loss_coefficient
_UpperCamelCase : Dict = dice_loss_coefficient
_UpperCamelCase : Dict = bbox_loss_coefficient
_UpperCamelCase : Any = giou_loss_coefficient
_UpperCamelCase : Optional[int] = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.encoder_attention_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return self.d_model
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> float:
return 1e-5
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return 12
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCamelCase__ = "src/transformers"
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase__ = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase__ = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowerCamelCase__ = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase__ = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase__ = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase__ = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase__ = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase__ = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowerCamelCase__ = re.compile(R"^\s*try:")
# Catches a line with else:
lowerCamelCase__ = re.compile(R"^\s*else:")
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(lowercase_ ) is None:
return None
_UpperCamelCase : Any = [b[0] for b in _re_backend.findall(lowercase_ )]
backends.sort()
return "_and_".join(lowercase_ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : int = f.readlines()
_UpperCamelCase : Any = 0
while line_index < len(lowercase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase_ ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCamelCase : int = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase_ ):
_UpperCamelCase : Tuple = _re_one_line_import_struct.search(lowercase_ ).groups()[0]
_UpperCamelCase : Optional[Any] = re.findall("\[([^\]]+)\]" ,lowercase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_UpperCamelCase : str = _re_import_struct_key_value.search(lowercase_ )
if single_line_import_search is not None:
_UpperCamelCase : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCamelCase : Union[str, Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCamelCase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_UpperCamelCase : Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(lowercase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(", " )
_UpperCamelCase : int = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_between_brackets.search(lowercase_ ) is not None:
_UpperCamelCase : List[str] = _re_between_brackets.search(lowercase_ ).groups()[0].split(", " )
_UpperCamelCase : str = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_quote_object.search(lowercase_ ) is not None:
objects.append(_re_quote_object.search(lowercase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_UpperCamelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCamelCase : int = []
while (
line_index < len(lowercase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_UpperCamelCase : str = lines[line_index]
_UpperCamelCase : Tuple = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCamelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_UpperCamelCase : int = lines[line_index]
_UpperCamelCase : Union[str, Any] = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCamelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
def find_duplicates(lowercase_ ):
return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCamelCase : Tuple = []
for key in import_dict_objects.keys():
_UpperCamelCase : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_UpperCamelCase : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCamelCase : Dict = "base imports" if key == "none" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : int = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
_UpperCamelCase : Any = os.path.join(lowercase_ ,"__init__.py" )
_UpperCamelCase : List[Any] = parse_init(lowercase_ )
if objects is not None:
_UpperCamelCase : List[str] = analyze_results(*lowercase_ )
if len(lowercase_ ) > 0:
_UpperCamelCase : Optional[int] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(lowercase_ ) )
if len(lowercase_ ) > 0:
raise ValueError("\n\n".join(lowercase_ ) )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
for path, directories, files in os.walk(lowercase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(lowercase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
_UpperCamelCase : Optional[int] = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) )
_UpperCamelCase : Any = short_path.replace(os.path.sep ,"." )
submodules.append(lowercase_ )
for fname in files:
if fname == "__init__.py":
continue
_UpperCamelCase : int = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) )
_UpperCamelCase : Union[str, Any] = short_path.replace(".py" ,"" ).replace(os.path.sep ,"." )
if len(submodule.split("." ) ) == 1:
submodules.append(lowercase_ )
return submodules
lowerCamelCase__ = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase : Tuple = importlib.util.spec_from_file_location(
"transformers" ,os.path.join(lowercase_ ,"__init__.py" ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
_UpperCamelCase : str = spec.loader.load_module()
_UpperCamelCase : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase_ ) > 0:
_UpperCamelCase : List[Any] = "\n".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"image": Image()} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"labels": ClassLabel} )
SCREAMING_SNAKE_CASE__ :str = "image"
SCREAMING_SNAKE_CASE__ :str = "labels"
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
_UpperCamelCase : List[Any] = copy.deepcopy(self )
_UpperCamelCase : Any = self.label_schema.copy()
_UpperCamelCase : str = features[self.label_column]
_UpperCamelCase : str = label_schema
return task_template
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 51
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 1
|
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase__ ( lowercase_ ) -> List[Tuple[int, ...]]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = []
if isinstance(lowercase_ ,lowercase_ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase_ ) )
elif isinstance(lowercase_ ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase_ ) )
elif isinstance(lowercase_ ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[int, ...]:
"""simple docstring"""
_UpperCamelCase : Any = []
for d in reversed(lowercase_ ):
idx.append(flat_idx % d )
_UpperCamelCase : Dict = flat_idx // d
return tuple(reversed(lowercase_ ) )
@torch.jit.ignore
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = None ,lowercase_ = None ,) -> List[Tuple[slice, ...]]:
"""simple docstring"""
def reduce_edge_list(lowercase_ ) -> None:
_UpperCamelCase : List[Any] = True
for i in range(len(lowercase_ ) ):
_UpperCamelCase : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
_UpperCamelCase : str = l[reversed_idx]
if start_edges is None:
_UpperCamelCase : List[str] = [s == 0 for s in start]
reduce_edge_list(lowercase_ )
if end_edges is None:
_UpperCamelCase : List[str] = [e == (d - 1) for e, d in zip(lowercase_ ,lowercase_ )]
reduce_edge_list(lowercase_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase_ ) == 0:
return [()]
elif len(lowercase_ ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
_UpperCamelCase : List[Tuple[slice, ...]] = []
_UpperCamelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase_ ,lowercase_ ):
if s == e:
path_list.append(slice(lowercase_ ,s + 1 ) )
else:
break
_UpperCamelCase : Tuple[slice, ...] = tuple(lowercase_ )
_UpperCamelCase : List[Any] = len(lowercase_ )
# start == end, and we're done
if divergence_idx == len(lowercase_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCamelCase : int = start[divergence_idx]
return tuple(
path + (slice(lowercase_ ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCamelCase : Optional[Any] = end[divergence_idx]
return tuple(
path + (slice(lowercase_ ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_UpperCamelCase : Any = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> torch.Tensor:
"""simple docstring"""
_UpperCamelCase : Any = t.shape[:no_batch_dims]
_UpperCamelCase : List[Any] = list(_flat_idx_to_idx(lowercase_ ,lowercase_ ) )
# _get_minimal_slice_set is inclusive
_UpperCamelCase : Tuple = list(_flat_idx_to_idx(flat_end - 1 ,lowercase_ ) )
# Get an ordered list of slices to perform
_UpperCamelCase : Dict = _get_minimal_slice_set(
lowercase_ ,lowercase_ ,lowercase_ ,)
_UpperCamelCase : Optional[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = False ,lowercase_ = None ,lowercase_ = False ,) -> Any:
"""simple docstring"""
if not (len(lowercase_ ) > 0):
raise ValueError("Must provide at least one input" )
_UpperCamelCase : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase_ )]
_UpperCamelCase : List[Any] = tuple([max(lowercase_ ) for s in zip(*lowercase_ )] )
def _prep_inputs(lowercase_ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_UpperCamelCase : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_UpperCamelCase : List[Any] = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
_UpperCamelCase : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_UpperCamelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs ,lowercase_ )
_UpperCamelCase : Optional[Any] = None
if _out is not None:
_UpperCamelCase : List[Any] = tensor_tree_map(lambda lowercase_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
_UpperCamelCase : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_UpperCamelCase : Dict = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase_ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_UpperCamelCase : str = 0
_UpperCamelCase : List[str] = prepped_outputs
for _ in range(lowercase_ ):
# Chunk the input
if not low_mem:
_UpperCamelCase : List[Any] = _select_chunk
else:
_UpperCamelCase : List[Any] = partial(
_chunk_slice ,flat_start=lowercase_ ,flat_end=min(lowercase_ ,i + chunk_size ) ,no_batch_dims=len(lowercase_ ) ,)
_UpperCamelCase : Dict[str, Any] = tensor_tree_map(lowercase_ ,lowercase_ )
# Run the layer on the chunk
_UpperCamelCase : List[str] = layer(**lowercase_ )
# Allocate space for the output
if out is None:
_UpperCamelCase : Optional[Any] = tensor_tree_map(lambda lowercase_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,lowercase_ )
# Put the chunk in its pre-allocated space
if isinstance(lowercase_ ,lowercase_ ):
def assign(lowercase_ ,lowercase_ ) -> None:
for k, v in da.items():
if isinstance(lowercase_ ,lowercase_ ):
assign(lowercase_ ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_UpperCamelCase : int = da[k]
assign(lowercase_ ,lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ):
for xa, xa in zip(lowercase_ ,lowercase_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_UpperCamelCase : str = xa
elif isinstance(lowercase_ ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_UpperCamelCase : Union[str, Any] = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
_UpperCamelCase : Optional[Any] = tensor_tree_map(lambda lowercase_ : t.view(orig_batch_dims + t.shape[1:] ) ,lowercase_ )
return out
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __a : int = 512 , ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = max_chunk_size
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[tuple] = None
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Callable , __a : tuple , __a : int ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_UpperCamelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_UpperCamelCase : Union[str, Any] = [c for c in candidates if c > min_chunk_size]
_UpperCamelCase : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
_UpperCamelCase : Tuple = 0
_UpperCamelCase : Tuple = len(__a ) - 1
while i > min_viable_chunk_size_index:
_UpperCamelCase : int = test_chunk_size(candidates[i] )
if not viable:
_UpperCamelCase : Dict = (min_viable_chunk_size_index + i) // 2
else:
_UpperCamelCase : List[str] = i
_UpperCamelCase : int = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Iterable , __a : Iterable ) -> bool:
_UpperCamelCase : Optional[Any] = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
_UpperCamelCase : List[Any] = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
_UpperCamelCase : Tuple = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def __SCREAMING_SNAKE_CASE ( self : str , __a : Callable , __a : tuple , __a : int , ) -> int:
_UpperCamelCase : Dict = True
_UpperCamelCase : tuple = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
_UpperCamelCase : Any = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
_UpperCamelCase : Optional[int] = False
if not consistent:
_UpperCamelCase : Optional[Any] = self._determine_favorable_chunk_size(
__a , __a , __a , )
_UpperCamelCase : int = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 51
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = DiTPipeline
SCREAMING_SNAKE_CASE__ :List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ :Dict = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
SCREAMING_SNAKE_CASE__ :Any = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ :str = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase : int = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__a , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=__a , )
_UpperCamelCase : Any = AutoencoderKL()
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : Any=0 ) -> List[str]:
if str(__a ).startswith("mps" ):
_UpperCamelCase : Optional[int] = torch.manual_seed(__a )
else:
_UpperCamelCase : Dict = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : Optional[Any] = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = "cpu"
_UpperCamelCase : Optional[int] = self.get_dummy_components()
_UpperCamelCase : str = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Any = self.get_dummy_inputs(__a )
_UpperCamelCase : str = pipe(**__a ).images
_UpperCamelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCamelCase : Any = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
_UpperCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=__a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = torch.manual_seed(0 )
_UpperCamelCase : List[str] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
_UpperCamelCase : Dict = ["vase", "umbrella", "white shark", "white wolf"]
_UpperCamelCase : Any = pipe.get_label_ids(__a )
_UpperCamelCase : Optional[int] = pipe(__a , generator=__a , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(__a , __a ):
_UpperCamelCase : List[str] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
_UpperCamelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
_UpperCamelCase : Tuple = ["vase", "umbrella"]
_UpperCamelCase : Optional[int] = pipe.get_label_ids(__a )
_UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCamelCase : List[str] = pipe(__a , generator=__a , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(__a , __a ):
_UpperCamelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = torch.load(lowercase_ ,map_location="cpu" )
if "model" in sd.keys():
_UpperCamelCase : Union[str, Any] = torch.load(lowercase_ ,map_location="cpu" )["model"]
# pop unnecessary weights
_UpperCamelCase : List[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
_UpperCamelCase : Optional[Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_UpperCamelCase : str = sd.pop(lowercase_ )
_UpperCamelCase : Dict = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_UpperCamelCase : List[str] = sd[key]
# We split QKV in separate Q,K,V
_UpperCamelCase : Tuple = key.replace(".qkv_proj." ,".q_proj." )
_UpperCamelCase : Any = key.replace(".qkv_proj." ,".k_proj." )
_UpperCamelCase : Tuple = key.replace(".qkv_proj." ,".v_proj." )
_UpperCamelCase : Optional[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = torch.split(lowercase_ ,depth // 3 ,dim=0 )
_UpperCamelCase : int = q
_UpperCamelCase : Optional[Any] = k
_UpperCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : List[Any] = load_checkpoint(lowercase_ )
if config is not None:
_UpperCamelCase : Tuple = OPTConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Union[str, Any] = OPTConfig()
_UpperCamelCase : List[Any] = OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
lowerCamelCase__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __a : Optional[Any] , __a : str , __a : Optional[int] ) -> Any:
_UpperCamelCase : Tuple = name
_UpperCamelCase : Any = value
_UpperCamelCase : List[Any] = weight
def __repr__( self : Tuple ) -> List[str]:
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return self.value
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
return self.name
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return self.weight
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.value / self.weight
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Optional[int] = []
for i in range(len(lowercase_ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : str = sorted(lowercase_ ,key=lowercase_ ,reverse=lowercase_ )
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase, _UpperCamelCase : str = 0.0, 0.0
for i in range(len(lowercase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = [1]
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = 0, 0, 0
_UpperCamelCase : str = ugly_nums[ia] * 2
_UpperCamelCase : int = ugly_nums[ia] * 3
_UpperCamelCase : Tuple = ugly_nums[ia] * 5
for _ in range(1 ,lowercase_ ):
_UpperCamelCase : Optional[int] = min(lowercase_ ,lowercase_ ,lowercase_ )
ugly_nums.append(lowercase_ )
if next_num == next_a:
ia += 1
_UpperCamelCase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_UpperCamelCase : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_UpperCamelCase : str = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(200) = }""")
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.