code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ ):
super().__init__()
snake_case_ : List[Any] = nn.ModuleList(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase__ , lowercase__ , self.nets ) ):
snake_case_ : str = controlnet(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# merge samples
if i == 0:
snake_case_ : Tuple = down_samples, mid_sample
else:
snake_case_ : int = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase__ , lowercase__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __UpperCamelCase (self , lowercase__ , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , ):
snake_case_ : Optional[int] = 0
snake_case_ : List[str] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase__ , is_main_process=lowercase__ , save_function=lowercase__ , safe_serialization=lowercase__ , variant=lowercase__ , )
idx += 1
snake_case_ : Any = model_path_to_save + f'_{idx}'
@classmethod
def __UpperCamelCase (cls , lowercase__ , **lowercase__ ):
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case_ : int = pretrained_model_path
while os.path.isdir(lowercase__ ):
snake_case_ : Any = ControlNetModel.from_pretrained(lowercase__ , **lowercase__ )
controlnets.append(lowercase__ )
idx += 1
snake_case_ : Dict = pretrained_model_path + f'_{idx}'
logger.info(f'{len(lowercase__ )} controlnets loaded from {pretrained_model_path}.' )
if len(lowercase__ ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(lowercase__ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(lowercase__ )
| 707
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 0
|
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
"""simple docstring"""
snake_case_ : Optional[Any] = tesseract_config if tesseract_config is not None else """"""
# apply OCR
snake_case_ : Optional[int] = to_pil_image(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = pil_image.size
snake_case_ : Optional[Any] = pytesseract.image_to_data(SCREAMING_SNAKE_CASE__ , lang=SCREAMING_SNAKE_CASE__ , output_type="""dict""" , config=SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
snake_case_ : List[Any] = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if not word.strip()]
snake_case_ : Any = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
snake_case_ : Optional[int] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
snake_case_ : List[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
snake_case_ : Any = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
snake_case_ : List[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case_ : List[Any] = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE__ )
# finally, normalize the bounding boxes
snake_case_ : Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = None , lowercase__ = "" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Any = get_size_dict(lowercase__ )
snake_case_ : Optional[int] = do_resize
snake_case_ : Any = size
snake_case_ : Optional[int] = resample
snake_case_ : Optional[int] = apply_ocr
snake_case_ : List[Any] = ocr_lang
snake_case_ : Tuple = tesseract_config
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
snake_case_ : Dict = (size["""height"""], size["""width"""])
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : List[str] = size if size is not None else self.size
snake_case_ : List[str] = get_size_dict(lowercase__ )
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case_ : str = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case_ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : str = [to_numpy_array(lowercase__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
snake_case_ : Tuple = []
snake_case_ : Tuple = []
for image in images:
snake_case_ : Optional[int] = apply_tesseract(lowercase__ , lowercase__ , lowercase__ )
words_batch.append(lowercase__ )
boxes_batch.append(lowercase__ )
if do_resize:
snake_case_ : Union[str, Any] = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case_ : Any = [flip_channel_order(lowercase__ ) for image in images]
snake_case_ : int = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : str = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowercase__ )
if apply_ocr:
snake_case_ : List[str] = words_batch
snake_case_ : Tuple = boxes_batch
return data
| 708
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = SwinvaConfig()
snake_case_ : Union[str, Any] = swinva_name.split("""_""" )
snake_case_ : List[Any] = name_split[1]
if "to" in name_split[3]:
snake_case_ : Union[str, Any] = int(name_split[3][-3:] )
else:
snake_case_ : Optional[int] = int(name_split[3] )
if "to" in name_split[2]:
snake_case_ : Dict = int(name_split[2][-2:] )
else:
snake_case_ : int = int(name_split[2][6:] )
if model_size == "tiny":
snake_case_ : str = 9_6
snake_case_ : Dict = (2, 2, 6, 2)
snake_case_ : int = (3, 6, 1_2, 2_4)
elif model_size == "small":
snake_case_ : Tuple = 9_6
snake_case_ : Optional[Any] = (2, 2, 1_8, 2)
snake_case_ : Optional[int] = (3, 6, 1_2, 2_4)
elif model_size == "base":
snake_case_ : Dict = 1_2_8
snake_case_ : str = (2, 2, 1_8, 2)
snake_case_ : List[Any] = (4, 8, 1_6, 3_2)
else:
snake_case_ : Tuple = 1_9_2
snake_case_ : Tuple = (2, 2, 1_8, 2)
snake_case_ : Optional[Any] = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
snake_case_ : str = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
snake_case_ : str = 2_1_8_4_1
snake_case_ : str = """huggingface/label-files"""
snake_case_ : List[str] = """imagenet-22k-id2label.json"""
snake_case_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : Optional[int] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
else:
snake_case_ : int = 1_0_0_0
snake_case_ : List[Any] = """huggingface/label-files"""
snake_case_ : List[Any] = """imagenet-1k-id2label.json"""
snake_case_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : Union[str, Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ : Dict = idalabel
snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = img_size
snake_case_ : int = num_classes
snake_case_ : str = embed_dim
snake_case_ : Dict = depths
snake_case_ : int = num_heads
snake_case_ : Optional[Any] = window_size
return config
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if "patch_embed.proj" in name:
snake_case_ : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case_ : Union[str, Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
snake_case_ : List[str] = """encoder.""" + name
if "attn.proj" in name:
snake_case_ : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case_ : Dict = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case_ : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case_ : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case_ : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case_ : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
snake_case_ : Optional[Any] = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
snake_case_ : List[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
snake_case_ : int = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
snake_case_ : str = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
snake_case_ : Tuple = """layernorm.weight"""
if name == "norm.bias":
snake_case_ : int = """layernorm.bias"""
if "head" in name:
snake_case_ : Union[str, Any] = name.replace("""head""" , """classifier""" )
else:
snake_case_ : Optional[Any] = """swinv2.""" + name
return name
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case_ : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "mask" in key:
continue
elif "qkv" in key:
snake_case_ : List[Any] = key.split(""".""" )
snake_case_ : str = int(key_split[1] )
snake_case_ : Optional[Any] = int(key_split[3] )
snake_case_ : List[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case_ : Any = val[:dim, :]
snake_case_ : Optional[Any] = val[dim : dim * 2, :]
snake_case_ : int = val[-dim:, :]
else:
snake_case_ : List[str] = val[:dim]
snake_case_ : Tuple = val[
dim : dim * 2
]
snake_case_ : Union[str, Any] = val[-dim:]
else:
snake_case_ : int = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Optional[Any] = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
snake_case_ : Any = get_swinva_config(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case_ : Any = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
snake_case_ : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
snake_case_ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
snake_case_ : Optional[int] = timm_model(inputs["""pixel_values"""] )
snake_case_ : List[str] = model(**SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 709
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 0
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : int = fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" , SCREAMING_SNAKE_CASE__ ).groups()[0]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=None , lowercase__=None ):
snake_case_ : Union[str, Any] = file_names
snake_case_ : Tuple = image_transform
snake_case_ : Union[str, Any] = label_to_id
def __len__(self ):
return len(self.file_names )
def __getitem__(self , lowercase__ ):
snake_case_ : Optional[Any] = self.file_names[idx]
snake_case_ : Dict = PIL.Image.open(lowercase__ )
snake_case_ : Union[str, Any] = raw_image.convert("""RGB""" )
if self.image_transform is not None:
snake_case_ : Union[str, Any] = self.image_transform(lowercase__ )
snake_case_ : Dict = extract_label(lowercase__ )
if self.label_to_id is not None:
snake_case_ : Union[str, Any] = self.label_to_id[label]
return {"image": image, "label": label}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
if args.with_tracking:
snake_case_ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
snake_case_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Tuple = config["""lr"""]
snake_case_ : Any = int(config["""num_epochs"""] )
snake_case_ : str = int(config["""seed"""] )
snake_case_ : Any = int(config["""batch_size"""] )
snake_case_ : Any = config["""image_size"""]
if not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
snake_case_ : Dict = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
snake_case_ : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
snake_case_ : Any = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
snake_case_ : Dict = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
snake_case_ : Any = os.path.split(SCREAMING_SNAKE_CASE__ )[-1].split(""".""" )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Grab all the image filenames
snake_case_ : Any = [os.path.join(args.data_dir , SCREAMING_SNAKE_CASE__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
snake_case_ : int = [extract_label(SCREAMING_SNAKE_CASE__ ) for fname in file_names]
snake_case_ : List[str] = list(set(SCREAMING_SNAKE_CASE__ ) )
id_to_label.sort()
snake_case_ : str = {lbl: i for i, lbl in enumerate(SCREAMING_SNAKE_CASE__ )}
# Set the seed before splitting the data.
np.random.seed(SCREAMING_SNAKE_CASE__ )
torch.manual_seed(SCREAMING_SNAKE_CASE__ )
torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE__ )
# Split our filenames between train and validation
snake_case_ : Union[str, Any] = np.random.permutation(len(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : Dict = int(0.8 * len(SCREAMING_SNAKE_CASE__ ) )
snake_case_ : Union[str, Any] = random_perm[:cut]
snake_case_ : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
snake_case_ : Any = Compose([RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.5, 1.0) ), ToTensor()] )
snake_case_ : Tuple = PetsDataset(
[file_names[i] for i in train_split] , image_transform=SCREAMING_SNAKE_CASE__ , label_to_id=SCREAMING_SNAKE_CASE__ )
# For evaluation, we use a deterministic Resize
snake_case_ : Tuple = Compose([Resize(SCREAMING_SNAKE_CASE__ ), ToTensor()] )
snake_case_ : str = PetsDataset([file_names[i] for i in eval_split] , image_transform=SCREAMING_SNAKE_CASE__ , label_to_id=SCREAMING_SNAKE_CASE__ )
# Instantiate dataloaders.
snake_case_ : int = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
snake_case_ : Optional[int] = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Tuple = create_model("""resnet50d""" , pretrained=SCREAMING_SNAKE_CASE__ , num_classes=len(SCREAMING_SNAKE_CASE__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
snake_case_ : Any = False
for param in model.get_classifier().parameters():
snake_case_ : List[str] = True
# We normalize the batches of images to be a bit faster.
snake_case_ : Dict = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
snake_case_ : Any = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
snake_case_ : str = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
snake_case_ : Dict = OneCycleLR(optimizer=SCREAMING_SNAKE_CASE__ , max_lr=SCREAMING_SNAKE_CASE__ , epochs=SCREAMING_SNAKE_CASE__ , steps_per_epoch=len(SCREAMING_SNAKE_CASE__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ : Any = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
snake_case_ : Optional[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
snake_case_ : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
snake_case_ : Tuple = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
snake_case_ : List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
snake_case_ : int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
snake_case_ : Union[str, Any] = os.path.splitext(SCREAMING_SNAKE_CASE__ )[0]
if "epoch" in training_difference:
snake_case_ : Tuple = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
snake_case_ : str = None
else:
snake_case_ : Tuple = int(training_difference.replace("""step_""" , """""" ) )
snake_case_ : Optional[Any] = resume_step // len(SCREAMING_SNAKE_CASE__ )
resume_step -= starting_epoch * len(SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model.train()
if args.with_tracking:
snake_case_ : Dict = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
snake_case_ : Tuple = accelerator.skip_first_batches(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
snake_case_ : Dict = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case_ : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case_ : List[str] = (batch["""image"""] - mean) / std
snake_case_ : int = model(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = torch.nn.functional.cross_entropy(SCREAMING_SNAKE_CASE__ , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Dict = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
snake_case_ : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case_ : List[str] = 0
snake_case_ : Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case_ : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case_ : Optional[Any] = (batch["""image"""] - mean) / std
with torch.no_grad():
snake_case_ : int = model(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = outputs.argmax(dim=-1 )
snake_case_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
snake_case_ : Dict = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
snake_case_ : Tuple = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {1_0_0 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 1_0_0 * eval_metric,
"""train_loss""": total_loss.item() / len(SCREAMING_SNAKE_CASE__ ),
"""epoch""": epoch,
} , step=SCREAMING_SNAKE_CASE__ , )
if checkpointing_steps == "epoch":
snake_case_ : Dict = f'epoch_{epoch}'
if args.output_dir is not None:
snake_case_ : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=SCREAMING_SNAKE_CASE__ , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=SCREAMING_SNAKE_CASE__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=SCREAMING_SNAKE_CASE__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
snake_case_ : Tuple = parser.parse_args()
snake_case_ : Dict = {"""lr""": 3E-2, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 6_4, """image_size""": 2_2_4}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 710
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
a_ = 5
a_ = 10
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = SpeechaTextTokenizer
_A : Any = False
_A : Optional[Any] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : List[str] = sp.SentencePieceProcessor()
spm_model.Load(lowercase__ )
snake_case_ : Optional[int] = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowercase__ ) )]
snake_case_ : List[Any] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : List[str] = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
snake_case_ : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self ):
snake_case_ : Dict = """<pad>"""
snake_case_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowercase__ ) , 10_01 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def __UpperCamelCase (self ):
snake_case_ : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
snake_case_ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [2_89, 50, 14, 1_74, 3_86] , )
snake_case_ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
snake_case_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
snake_case_ : Dict = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : List[str] = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : str = """valhalla/s2t_mustc_multilinguial_medium"""
_A : Tuple = """C'est trop cool"""
_A : List[str] = """Esto es genial"""
@classmethod
def __UpperCamelCase (cls ):
snake_case_ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __UpperCamelCase (self ):
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def __UpperCamelCase (self ):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def __UpperCamelCase (self ):
self.assertIn(lowercase__ , self.tokenizer.all_special_ids )
snake_case_ : List[str] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
snake_case_ : Optional[int] = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertNotIn(self.tokenizer.eos_token , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = """fr"""
snake_case_ : Optional[Any] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , lowercase__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
snake_case_ : str = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 711
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : int = 1
snake_case_ : List[str] = 2
while i * i <= n:
snake_case_ : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[int] = 1
snake_case_ : str = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 712
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 0
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Any = """xlm-prophetnet"""
_A : Union[str, Any] = ["""past_key_values"""]
_A : Optional[int] = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__(self , lowercase__ = 0.1 , lowercase__ = "gelu" , lowercase__ = 3_05_22 , lowercase__ = 10_24 , lowercase__ = 40_96 , lowercase__ = 12 , lowercase__ = 16 , lowercase__ = 40_96 , lowercase__ = 12 , lowercase__ = 16 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 5_12 , lowercase__ = 0.02 , lowercase__ = True , lowercase__ = True , lowercase__ = 0 , lowercase__ = 2 , lowercase__ = 32 , lowercase__ = 1_28 , lowercase__ = False , lowercase__ = 0.0 , lowercase__ = True , lowercase__ = 0 , lowercase__ = 1 , lowercase__ = 2 , **lowercase__ , ):
snake_case_ : Dict = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : Tuple = encoder_ffn_dim
snake_case_ : Optional[Any] = num_encoder_layers
snake_case_ : Dict = num_encoder_attention_heads
snake_case_ : Dict = decoder_ffn_dim
snake_case_ : Union[str, Any] = num_decoder_layers
snake_case_ : Tuple = num_decoder_attention_heads
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : str = init_std # Normal(0, this parameter)
snake_case_ : Optional[Any] = activation_function
# parameters for xlmprophetnet
snake_case_ : Dict = ngram
snake_case_ : List[Any] = num_buckets
snake_case_ : Union[str, Any] = relative_max_distance
snake_case_ : Optional[int] = disable_ngram_loss
snake_case_ : Optional[int] = eps
# 3 Types of Dropout
snake_case_ : Optional[Any] = attention_dropout
snake_case_ : Optional[int] = activation_dropout
snake_case_ : Any = dropout
snake_case_ : Tuple = use_cache
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , add_cross_attention=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
@property
def __UpperCamelCase (self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __UpperCamelCase (self , lowercase__ ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 713
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 714
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = """trocr"""
_A : Union[str, Any] = ["""past_key_values"""]
_A : str = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__(self , lowercase__=5_02_65 , lowercase__=10_24 , lowercase__=12 , lowercase__=16 , lowercase__=40_96 , lowercase__="gelu" , lowercase__=5_12 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=2 , lowercase__=0.02 , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : int = d_model
snake_case_ : Any = decoder_layers
snake_case_ : str = decoder_attention_heads
snake_case_ : Optional[Any] = decoder_ffn_dim
snake_case_ : str = activation_function
snake_case_ : Dict = max_position_embeddings
snake_case_ : Dict = dropout
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Dict = activation_dropout
snake_case_ : Tuple = init_std
snake_case_ : Optional[int] = decoder_layerdrop
snake_case_ : Optional[int] = use_cache
snake_case_ : Union[str, Any] = scale_embedding
snake_case_ : Optional[Any] = use_learned_position_embeddings
snake_case_ : Dict = layernorm_embedding
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
| 715
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 0
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __lowercase ( torch.nn.Module):
"""simple docstring"""
def __init__(self , lowercase__="sayef/fsner-bert-base-uncased" ):
super(lowercase__ , self ).__init__()
snake_case_ : Optional[Any] = AutoModel.from_pretrained(lowercase__ , return_dict=lowercase__ )
snake_case_ : int = torch.nn.CosineSimilarity(3 , 1e-08 )
snake_case_ : Optional[int] = torch.nn.Softmax(dim=1 )
def __UpperCamelCase (self , **lowercase__ ):
return self.bert(**lowercase__ ).last_hidden_state
def __UpperCamelCase (self , lowercase__ ):
return token_embeddings.sum(2 , keepdim=lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__=1 ):
return self.softmax(T * self.cos(lowercase__ , lowercase__ ) )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Tuple = W_supports["""sizes"""].tolist()
snake_case_ : Optional[Any] = W_supports["""start_token_id"""].item()
snake_case_ : Dict = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case_ : int = self.BERT(**lowercase__ )
snake_case_ : Union[str, Any] = self.BERT(**lowercase__ )
snake_case_ : Union[str, Any] = None
snake_case_ : List[Any] = None
snake_case_ : Optional[int] = W_supports["""input_ids"""] == start_token_id
snake_case_ : List[Any] = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(lowercase__ ):
if i == 0:
snake_case_ : Union[str, Any] = 0
else:
snake_case_ : List[Any] = support_sizes[i - 1]
snake_case_ : Dict = S[s : s + size][start_token_masks[s : s + size]]
snake_case_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
snake_case_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
snake_case_ : str = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
snake_case_ : Union[str, Any] = torch.vstack((p_starts, p_start) )
snake_case_ : str = torch.vstack((p_ends, p_end) )
else:
snake_case_ : str = p_start
snake_case_ : List[str] = p_end
return p_starts, p_ends
| 716
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 717
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[PIL.Image.Image, np.ndarray]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
prior=lowercase__ , image_encoder=lowercase__ , image_processor=lowercase__ , scheduler=lowercase__ , renderer=lowercase__ , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if latents is None:
snake_case_ : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
snake_case_ : Tuple = latents.to(lowercase__ )
snake_case_ : Any = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase (self , lowercase__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case_ : List[Any] = torch.device(f'cuda:{gpu_id}' )
snake_case_ : List[str] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__ )
@property
def __UpperCamelCase (self ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(image[0] , torch.Tensor ):
snake_case_ : Tuple = torch.cat(lowercase__ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase__ , axis=0 )
if not isinstance(lowercase__ , torch.Tensor ):
snake_case_ : List[str] = self.image_processor(lowercase__ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
snake_case_ : List[Any] = image.to(dtype=self.image_encoder.dtype , device=lowercase__ )
snake_case_ : int = self.image_encoder(lowercase__ )["""last_hidden_state"""]
snake_case_ : Any = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
snake_case_ : List[str] = image_embeds.repeat_interleave(lowercase__ , dim=0 )
if do_classifier_free_guidance:
snake_case_ : Any = torch.zeros_like(lowercase__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase__ )
def __call__(self , lowercase__ , lowercase__ = 1 , lowercase__ = 25 , lowercase__ = None , lowercase__ = None , lowercase__ = 4.0 , lowercase__ = 64 , lowercase__ = "pil" , lowercase__ = True , ):
if isinstance(lowercase__ , PIL.Image.Image ):
snake_case_ : Dict = 1
elif isinstance(lowercase__ , torch.Tensor ):
snake_case_ : str = image.shape[0]
elif isinstance(lowercase__ , lowercase__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
snake_case_ : Union[str, Any] = len(lowercase__ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase__ )}' )
snake_case_ : Tuple = self._execution_device
snake_case_ : int = batch_size * num_images_per_prompt
snake_case_ : Optional[int] = guidance_scale > 1.0
snake_case_ : List[str] = self._encode_image(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# prior
self.scheduler.set_timesteps(lowercase__ , device=lowercase__ )
snake_case_ : Any = self.scheduler.timesteps
snake_case_ : int = self.prior.config.num_embeddings
snake_case_ : Optional[int] = self.prior.config.embedding_dim
snake_case_ : Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
snake_case_ : Optional[int] = latents.reshape(latents.shape[0] , lowercase__ , lowercase__ )
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : Any = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
snake_case_ : List[Any] = self.prior(
lowercase__ , timestep=lowercase__ , proj_embedding=lowercase__ , ).predicted_image_embedding
# remove the variance
snake_case_ : Any = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
snake_case_ : List[Any] = noise_pred.chunk(2 )
snake_case_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
snake_case_ : int = self.scheduler.step(
lowercase__ , timestep=lowercase__ , sample=lowercase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase__ )
snake_case_ : List[str] = []
for i, latent in enumerate(lowercase__ ):
print()
snake_case_ : int = self.renderer.decode(
latent[None, :] , lowercase__ , size=lowercase__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(lowercase__ )
snake_case_ : Any = torch.stack(lowercase__ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
snake_case_ : Optional[Any] = images.cpu().numpy()
if output_type == "pil":
snake_case_ : List[str] = [self.numpy_to_pil(lowercase__ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase__ )
| 718
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
a_ = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
"""simple docstring"""
if rng is None:
snake_case_ : Union[str, Any] = random.Random()
snake_case_ : Optional[Any] = 1
for dim in shape:
total_dims *= dim
snake_case_ : List[Any] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
snake_case_ : Dict = np.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ).reshape(SCREAMING_SNAKE_CASE__ )
return output
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=None ):
"""simple docstring"""
snake_case_ : Tuple = ids_tensor(SCREAMING_SNAKE_CASE__ , vocab_size=2 , rng=SCREAMING_SNAKE_CASE__ )
# make sure that at least one token is attended to for each batch
snake_case_ : int = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_A : Optional[int] = None
_A : Tuple = ()
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
snake_case_ : List[Any] = 2
snake_case_ : Tuple = inputs["""input_ids"""].shape[-1] // 2
snake_case_ : Tuple = inputs["""input_ids"""][:max_batch_size, :sequence_length]
snake_case_ : int = jnp.ones_like(lowercase__ )
snake_case_ : List[str] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
snake_case_ : Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
snake_case_ : Union[str, Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self._get_input_ids_and_config()
snake_case_ : Any = False
snake_case_ : int = max_length
snake_case_ : Dict = 0
for model_class in self.all_generative_model_classes:
snake_case_ : Tuple = model_class(lowercase__ )
snake_case_ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ : Dict = getattr(lowercase__ , lowercase__ )
snake_case_ : Optional[Any] = pt_model_class(lowercase__ ).eval()
snake_case_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase__ , flax_model.params )
snake_case_ : Tuple = flax_model.generate(lowercase__ ).sequences
snake_case_ : Dict = pt_model.generate(torch.tensor(lowercase__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
snake_case_ : Optional[int] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ : str = self._get_input_ids_and_config()
snake_case_ : List[str] = False
snake_case_ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case_ : Tuple = model_class(lowercase__ )
snake_case_ : Tuple = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : List[str] = jit(model.generate )
snake_case_ : Union[str, Any] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ : str = self._get_input_ids_and_config()
snake_case_ : List[Any] = True
snake_case_ : Tuple = max_length
for model_class in self.all_generative_model_classes:
snake_case_ : Optional[Any] = model_class(lowercase__ )
snake_case_ : List[Any] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : List[str] = jit(model.generate )
snake_case_ : Optional[int] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ : List[str] = self._get_input_ids_and_config()
snake_case_ : Tuple = False
snake_case_ : Union[str, Any] = max_length
snake_case_ : Optional[Any] = 2
for model_class in self.all_generative_model_classes:
snake_case_ : Dict = model_class(lowercase__ )
snake_case_ : Optional[int] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : List[Any] = jit(model.generate )
snake_case_ : Optional[Any] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self._get_input_ids_and_config()
snake_case_ : Optional[Any] = False
snake_case_ : Tuple = max_length
snake_case_ : Union[str, Any] = 2
snake_case_ : int = 2
for model_class in self.all_generative_model_classes:
snake_case_ : int = model_class(lowercase__ )
snake_case_ : Optional[int] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self._get_input_ids_and_config()
snake_case_ : List[str] = True
snake_case_ : List[Any] = max_length
snake_case_ : str = 0.8
snake_case_ : Optional[Any] = 10
snake_case_ : str = 0.3
snake_case_ : str = 1
snake_case_ : Optional[Any] = 8
snake_case_ : int = 9
for model_class in self.all_generative_model_classes:
snake_case_ : int = model_class(lowercase__ )
snake_case_ : Optional[int] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : Optional[int] = jit(model.generate )
snake_case_ : str = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ : Any = self._get_input_ids_and_config()
snake_case_ : str = max_length
snake_case_ : Optional[Any] = 1
snake_case_ : Optional[int] = 8
snake_case_ : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
snake_case_ : str = model_class(lowercase__ )
snake_case_ : Any = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : Any = jit(model.generate )
snake_case_ : List[str] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self._get_input_ids_and_config()
snake_case_ : List[str] = max_length
snake_case_ : Optional[int] = 2
snake_case_ : List[Any] = 1
snake_case_ : Optional[Any] = 8
snake_case_ : str = 9
for model_class in self.all_generative_model_classes:
snake_case_ : Tuple = model_class(lowercase__ )
snake_case_ : Optional[int] = model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : List[Any] = jit(model.generate )
snake_case_ : List[str] = jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ : Tuple = attention_mask.at[(0, 0)].set(0 )
snake_case_ : List[str] = False
snake_case_ : Tuple = max_length
for model_class in self.all_generative_model_classes:
snake_case_ : Dict = model_class(lowercase__ )
snake_case_ : Optional[Any] = model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : str = jit(model.generate )
snake_case_ : int = jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ : Any = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ : Tuple = attention_mask.at[(0, 0)].set(0 )
snake_case_ : Dict = True
snake_case_ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
snake_case_ : str = model_class(lowercase__ )
snake_case_ : Dict = model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : Tuple = jit(model.generate )
snake_case_ : List[str] = jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase (self ):
snake_case_ : int = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case_ : Any = attention_mask.at[(0, 0)].set(0 )
snake_case_ : Tuple = 2
snake_case_ : Any = max_length
for model_class in self.all_generative_model_classes:
snake_case_ : Any = model_class(lowercase__ )
snake_case_ : Union[str, Any] = model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
snake_case_ : List[str] = jit(model.generate )
snake_case_ : Optional[Any] = jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
snake_case_ : int = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
snake_case_ : List[Any] = """Hello world"""
snake_case_ : Optional[int] = tokenizer(lowercase__ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase__ , """do_samples""" ):
model.generate(lowercase__ , do_samples=lowercase__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase__ , """foo""" ):
snake_case_ : Optional[int] = {"""foo""": """bar"""}
model.generate(lowercase__ , **lowercase__ )
| 719
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 0
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int | float | str , SCREAMING_SNAKE_CASE__ : int | float | str ):
"""simple docstring"""
if nth_term == "":
return [""]
snake_case_ : List[Any] = int(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = int(SCREAMING_SNAKE_CASE__ )
snake_case_ : list[str] = []
for temp in range(int(SCREAMING_SNAKE_CASE__ ) ):
series.append(f'1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = int(input('''Enter the last number (nth term) of the P-Series'''))
a_ = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 720
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
a_ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = ["""input_ids""", """attention_mask"""]
_A : List[str] = BartTokenizer
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="replace" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=False , lowercase__=True , **lowercase__ , ):
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , **lowercase__ , )
snake_case_ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase__ ) != add_prefix_space:
snake_case_ : List[str] = getattr(lowercase__ , pre_tok_state.pop("""type""" ) )
snake_case_ : int = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**lowercase__ )
snake_case_ : List[str] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ : int = """post_processor"""
snake_case_ : Union[str, Any] = getattr(self.backend_tokenizer , lowercase__ , lowercase__ )
if tokenizer_component_instance:
snake_case_ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : int = tuple(state["""sep"""] )
if "cls" in state:
snake_case_ : List[Any] = tuple(state["""cls"""] )
snake_case_ : str = False
if state.get("""add_prefix_space""" , lowercase__ ) != add_prefix_space:
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : int = True
if state.get("""trim_offsets""" , lowercase__ ) != trim_offsets:
snake_case_ : int = trim_offsets
snake_case_ : int = True
if changes_to_apply:
snake_case_ : int = getattr(lowercase__ , state.pop("""type""" ) )
snake_case_ : List[str] = component_class(**lowercase__ )
setattr(self.backend_tokenizer , lowercase__ , lowercase__ )
@property
def __UpperCamelCase (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else value
snake_case_ : Tuple = value
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
snake_case_ : Union[str, Any] = kwargs.get("""is_split_into_words""" , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
snake_case_ : Dict = kwargs.get("""is_split_into_words""" , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Optional[Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__=None ):
snake_case_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Optional[Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 721
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if "resnet-50" in model_name:
snake_case_ : Union[str, Any] = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
snake_case_ : Optional[int] = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
snake_case_ : List[str] = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE__ , backbone_config=SCREAMING_SNAKE_CASE__ )
# set label attributes
snake_case_ : Union[str, Any] = """panoptic""" in model_name
if is_panoptic:
snake_case_ : Tuple = 2_5_0
else:
snake_case_ : Optional[Any] = 9_1
snake_case_ : Union[str, Any] = """huggingface/label-files"""
snake_case_ : List[Any] = """coco-detection-id2label.json"""
snake_case_ : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : Tuple = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ : List[Any] = idalabel
snake_case_ : int = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = val
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=False ):
"""simple docstring"""
snake_case_ : Tuple = """"""
if is_panoptic:
snake_case_ : str = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ : List[Any] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
snake_case_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : List[Any] = in_proj_weight[:2_5_6, :]
snake_case_ : Dict = in_proj_bias[:2_5_6]
snake_case_ : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
snake_case_ : Optional[int] = in_proj_bias[2_5_6:5_1_2]
snake_case_ : Dict = in_proj_weight[-2_5_6:, :]
snake_case_ : Dict = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
snake_case_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[:2_5_6, :]
snake_case_ : Tuple = in_proj_bias[:2_5_6]
snake_case_ : str = in_proj_weight[2_5_6:5_1_2, :]
snake_case_ : Optional[int] = in_proj_bias[2_5_6:5_1_2]
snake_case_ : Optional[int] = in_proj_weight[-2_5_6:, :]
snake_case_ : str = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
snake_case_ : List[str] = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
snake_case_ : Any = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
snake_case_ : int = in_proj_bias_cross_attn[:2_5_6]
snake_case_ : Dict = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
snake_case_ : Any = in_proj_bias_cross_attn[2_5_6:5_1_2]
snake_case_ : Tuple = in_proj_weight_cross_attn[-2_5_6:, :]
snake_case_ : int = in_proj_bias_cross_attn[-2_5_6:]
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
"""simple docstring"""
snake_case_ : Any = get_detr_config(SCREAMING_SNAKE_CASE__ )
# load original model from torch hub
snake_case_ : str = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f'Converting model {model_name}...' )
snake_case_ : List[str] = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
snake_case_ : Dict = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE__ ):
if is_panoptic:
snake_case_ : Union[str, Any] = """detr.""" + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ : Union[str, Any] = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
snake_case_ : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ : int = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
snake_case_ : Any = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case_ : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = val
# finally, create HuggingFace model and load state dict
snake_case_ : Optional[Any] = DetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion on an image
snake_case_ : int = """coco_panoptic""" if is_panoptic else """coco_detection"""
snake_case_ : List[Any] = DetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
snake_case_ : int = processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ : Optional[Any] = encoding["""pixel_values"""]
snake_case_ : Optional[Any] = detr(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = [], []
while len(SCREAMING_SNAKE_CASE__ ) > 1:
snake_case_ : int = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
start.append(SCREAMING_SNAKE_CASE__ )
end.append(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 701
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 0
|
"""simple docstring"""
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 702
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 0
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 703
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
snake_case_ : Optional[Any] = 0
snake_case_ : int = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
snake_case_ : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
snake_case_ : Dict = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
snake_case_ : List[Any] = left
snake_case_ : List[Any] = point
elif point > right:
snake_case_ : List[Any] = right
snake_case_ : Tuple = point
else:
if item < current_item:
snake_case_ : List[Any] = point - 1
else:
snake_case_ : Union[str, Any] = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
snake_case_ : int = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
a_ = 67
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 704
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
snake_case_ : str = set({"""(""", """[""", """{"""} )
snake_case_ : List[str] = set({""")""", """]""", """}"""} )
snake_case_ : Union[str, Any] = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(SCREAMING_SNAKE_CASE__ ) == 0 or (len(SCREAMING_SNAKE_CASE__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(SCREAMING_SNAKE_CASE__ ) == 0
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[Any] = input("""Enter sequence of brackets: """ )
if is_balanced(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ , """is balanced""" )
else:
print(SCREAMING_SNAKE_CASE__ , """is not balanced""" )
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
a_ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
snake_case_ : List[str] = """lm_head"""
snake_case_ : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
snake_case_ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
snake_case_ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
snake_case_ : List[str] = value
elif weight_type == "weight_g":
snake_case_ : Optional[Any] = value
elif weight_type == "weight_v":
snake_case_ : List[str] = value
elif weight_type == "bias":
snake_case_ : Optional[int] = value
else:
snake_case_ : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = fairseq_model.state_dict()
snake_case_ : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : Dict = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ : Dict = True
if "*" in mapped_key:
snake_case_ : Optional[Any] = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
snake_case_ : Union[str, Any] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
snake_case_ : Optional[int] = """weight_g"""
elif "weight_v" in name:
snake_case_ : Tuple = """weight_v"""
elif "bias" in name:
snake_case_ : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : str = """weight"""
else:
snake_case_ : List[str] = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Any = full_name.split("""conv_layers.""" )[-1]
snake_case_ : int = name.split(""".""" )
snake_case_ : List[Any] = int(items[0] )
snake_case_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
snake_case_ : Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
snake_case_ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
snake_case_ : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
snake_case_ : int = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=True ):
"""simple docstring"""
if config_path is not None:
snake_case_ : Union[str, Any] = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : Optional[int] = UniSpeechConfig()
if is_finetuned:
if dict_path:
snake_case_ : Optional[int] = Dictionary.load_from_json(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ : Union[str, Any] = target_dict.pad_index
snake_case_ : List[Any] = target_dict.bos_index
snake_case_ : str = target_dict.eos_index
snake_case_ : str = len(target_dict.symbols )
snake_case_ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ : int = 4_2
snake_case_ : Optional[int] = 4_3
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : int = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
snake_case_ : str = True if config.feat_extract_norm == """layer""" else False
snake_case_ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
snake_case_ : Union[str, Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = UniSpeechForCTC(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : Dict = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
snake_case_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
snake_case_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case_ : Any = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 706
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
| 0
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a_ = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Any=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class __lowercase :
"""simple docstring"""
_A : int
_A : float
_A : str
_A : bool
@dataclass
class __lowercase :
"""simple docstring"""
_A : int = 42
_A : str = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class __lowercase :
"""simple docstring"""
_A : bool = False
_A : bool = True
_A : Optional[bool] = None
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """titi"""
_A : str = """toto"""
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """titi"""
_A : Dict = """toto"""
_A : Tuple = 42
@dataclass
class __lowercase :
"""simple docstring"""
_A : BasicEnum = "toto"
def __UpperCamelCase (self ):
snake_case_ : int = BasicEnum(self.foo )
@dataclass
class __lowercase :
"""simple docstring"""
_A : MixedTypeEnum = "toto"
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class __lowercase :
"""simple docstring"""
_A : Optional[int] = None
_A : Optional[float] = field(default=_UpperCAmelCase , metadata={"""help""": """help message"""})
_A : Optional[str] = None
_A : Optional[List[str]] = list_field(default=[])
_A : Optional[List[int]] = list_field(default=[])
@dataclass
class __lowercase :
"""simple docstring"""
_A : List[int] = list_field(default=[])
_A : List[int] = list_field(default=[1, 2, 3])
_A : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
_A : List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __lowercase :
"""simple docstring"""
_A : List[int] = field()
_A : str = field()
_A : BasicEnum = field()
def __UpperCamelCase (self ):
snake_case_ : List[Any] = BasicEnum(self.required_enum )
@dataclass
class __lowercase :
"""simple docstring"""
_A : int
_A : "BasicEnum" = field()
_A : "Optional[bool]" = None
_A : "str" = field(default="""toto""" , metadata={"""help""": """help message"""})
_A : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class __lowercase :
"""simple docstring"""
_A : bool = False
_A : bool = True
_A : bool | None = None
@dataclass
class __lowercase :
"""simple docstring"""
_A : int | None = None
_A : float | None = field(default=_UpperCAmelCase , metadata={"""help""": """help message"""})
_A : str | None = None
_A : list[str] | None = list_field(default=[])
_A : list[int] | None = list_field(default=[])
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ : List[str] = {k: v for k, v in vars(lowercase__ ).items() if k != """container"""}
snake_case_ : Optional[Any] = {k: v for k, v in vars(lowercase__ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowercase__ ) and yy.get("""choices""" , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowercase__ ) , yy["""type"""](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Any = HfArgumentParser(lowercase__ )
snake_case_ : str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--bar""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--baz""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--flag""" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="""?""" )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(snake_case_ ) : Union[str, Any] = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def __UpperCamelCase (self ):
snake_case_ : List[str] = HfArgumentParser(lowercase__ )
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowercase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase__ , help="""help message""" )
self.argparsersEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowercase__ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowercase__ , default=lowercase__ )
snake_case_ : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
snake_case_ : Any = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : int = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Optional[int] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Optional[Any] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Optional[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Union[str, Any] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = HfArgumentParser(lowercase__ )
snake_case_ : Any = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ : Dict = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ : int = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ : Any = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
snake_case_ : List[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCamelCase (self ):
@dataclass
class __lowercase :
"""simple docstring"""
_A : Literal["titi", "toto", 42] = "toto"
snake_case_ : List[Any] = HfArgumentParser(lowercase__ )
snake_case_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ : Optional[int] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ : List[str] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def __UpperCamelCase (self ):
snake_case_ : str = HfArgumentParser(lowercase__ )
snake_case_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowercase__ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase__ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ : Optional[int] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def __UpperCamelCase (self ):
snake_case_ : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowercase__ , type=lowercase__ )
expected.add_argument("""--bar""" , default=lowercase__ , type=lowercase__ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowercase__ , type=lowercase__ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowercase__ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowercase__ )
snake_case_ : int = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
snake_case_ : Dict = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : int = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
snake_case_ : Optional[int] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = HfArgumentParser(lowercase__ )
snake_case_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--required_str""" , type=lowercase__ , required=lowercase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = HfArgumentParser(lowercase__ )
snake_case_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase__ , required=lowercase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase__ , )
expected.add_argument("""--opt""" , type=lowercase__ , default=lowercase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase__ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = HfArgumentParser(lowercase__ )
snake_case_ : Tuple = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case_ : List[Any] = parser.parse_dict(lowercase__ )[0]
snake_case_ : Optional[int] = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = HfArgumentParser(lowercase__ )
snake_case_ : Tuple = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = HfArgumentParser(lowercase__ )
snake_case_ : Optional[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : int = os.path.join(lowercase__ , """temp_json""" )
os.mkdir(lowercase__ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowercase__ , lowercase__ )
snake_case_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case_ : List[Any] = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = HfArgumentParser(lowercase__ )
snake_case_ : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Tuple = os.path.join(lowercase__ , """temp_yaml""" )
os.mkdir(lowercase__ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowercase__ , lowercase__ )
snake_case_ : Any = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case_ : Optional[int] = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : int = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 707
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 0
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __get__(self , lowercase__ , lowercase__=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
snake_case_ : int = """__cached_""" + self.fget.__name__
snake_case_ : str = getattr(lowercase__ , lowercase__ , lowercase__ )
if cached is None:
snake_case_ : Any = self.fget(lowercase__ )
setattr(lowercase__ , lowercase__ , lowercase__ )
return cached
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : List[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
if is_torch_fx_proxy(SCREAMING_SNAKE_CASE__ ):
return True
if is_torch_available():
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(SCREAMING_SNAKE_CASE__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
return _is_numpy(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
import torch
return isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
import torch
return isinstance(SCREAMING_SNAKE_CASE__ , torch.device )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return False
return isinstance(SCREAMING_SNAKE_CASE__ , torch.dtype )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
import tensorflow as tf
return isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(SCREAMING_SNAKE_CASE__ , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(SCREAMING_SNAKE_CASE__ )
return type(SCREAMING_SNAKE_CASE__ ) == tf.Tensor
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , (dict, UserDict) ):
return {k: to_py_obj(SCREAMING_SNAKE_CASE__ ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return [to_py_obj(SCREAMING_SNAKE_CASE__ ) for o in obj]
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.numpy().tolist()
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return np.asarray(SCREAMING_SNAKE_CASE__ ).tolist()
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , (dict, UserDict) ):
return {k: to_numpy(SCREAMING_SNAKE_CASE__ ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return np.array(SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.numpy()
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return np.asarray(SCREAMING_SNAKE_CASE__ )
else:
return obj
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : str = fields(self )
# Safety and consistency checks
if not len(lowercase__ ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
snake_case_ : Union[str, Any] = getattr(self , class_fields[0].name )
snake_case_ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : Optional[Any] = first_field.items()
snake_case_ : int = True
else:
try:
snake_case_ : Any = iter(lowercase__ )
snake_case_ : str = True
except TypeError:
snake_case_ : List[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase__ ):
if (
not isinstance(lowercase__ , (list, tuple) )
or not len(lowercase__ ) == 2
or not isinstance(element[0] , lowercase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
snake_case_ : Dict = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
snake_case_ : Union[str, Any] = element[1]
elif first_field is not None:
snake_case_ : Tuple = first_field
else:
for field in class_fields:
snake_case_ : Optional[int] = getattr(self , field.name )
if v is not None:
snake_case_ : Optional[int] = v
def __delitem__(self , *lowercase__ , **lowercase__ ):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__(self , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self , lowercase__ , lowercase__ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase__ , lowercase__ )
super().__setattr__(lowercase__ , lowercase__ )
def __setitem__(self , lowercase__ , lowercase__ ):
# Will raise a KeyException if needed
super().__setitem__(lowercase__ , lowercase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
return tuple(self[k] for k in self.keys() )
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase):
"""simple docstring"""
@classmethod
def __UpperCamelCase (cls , lowercase__ ):
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = """longest"""
_A : Optional[Any] = """max_length"""
_A : Optional[int] = """do_not_pad"""
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Any = """pt"""
_A : Tuple = """tf"""
_A : Any = """np"""
_A : Dict = """jax"""
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Any = context_managers
snake_case_ : int = ExitStack()
def __enter__(self ):
for context_manager in self.context_managers:
self.stack.enter_context(lowercase__ )
def __exit__(self , *lowercase__ , **lowercase__ ):
self.stack.__exit__(*lowercase__ , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Dict = infer_framework(SCREAMING_SNAKE_CASE__ )
if framework == "tf":
snake_case_ : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case_ : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case_ : List[str] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : Optional[int] = model_class.__name__
snake_case_ : Tuple = infer_framework(SCREAMING_SNAKE_CASE__ )
if framework == "tf":
snake_case_ : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case_ : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case_ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : MutableMapping , SCREAMING_SNAKE_CASE__ : str = "" , SCREAMING_SNAKE_CASE__ : str = "." ):
"""simple docstring"""
def _flatten_dict(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int]="" , SCREAMING_SNAKE_CASE__ : List[Any]="." ):
for k, v in d.items():
snake_case_ : Tuple = str(SCREAMING_SNAKE_CASE__ ) + delimiter + str(SCREAMING_SNAKE_CASE__ ) if parent_key else k
if v and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
yield from flatten_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delimiter=SCREAMING_SNAKE_CASE__ ).items()
else:
yield key, v
return dict(_flatten_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
"""simple docstring"""
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.transpose(SCREAMING_SNAKE_CASE__ , axes=SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.T if axes is None else array.permute(*SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.transpose(SCREAMING_SNAKE_CASE__ , perm=SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.transpose(SCREAMING_SNAKE_CASE__ , axes=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Type not supported for transpose: {type(SCREAMING_SNAKE_CASE__ )}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.reshape(*SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Type not supported for reshape: {type(SCREAMING_SNAKE_CASE__ )}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=None ):
"""simple docstring"""
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.squeeze(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.squeeze() if axis is None else array.squeeze(dim=SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.squeeze(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.squeeze(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Type not supported for squeeze: {type(SCREAMING_SNAKE_CASE__ )}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.expand_dims(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.unsqueeze(dim=SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.expand_dims(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE__ )}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.size(SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.numel()
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.size(SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE__ )}.' )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(SCREAMING_SNAKE_CASE__ , (tuple, list) ):
snake_case_ : Dict = [f'{repo_id}--{v}' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
snake_case_ : Union[str, Any] = f'{repo_id}--{value}'
return auto_map
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
for base_class in inspect.getmro(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Dict = base_class.__module__
snake_case_ : int = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 708
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=False ):
"""simple docstring"""
snake_case_ : str = """backbone.""" if is_semantic else """"""
snake_case_ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', """beit.embeddings.cls_token"""),
(f'{prefix}patch_embed.proj.weight', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'{prefix}patch_embed.proj.bias', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'{prefix}pos_embed', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
snake_case_ : Union[str, Any] = """backbone.""" if is_semantic else """"""
# queries, keys and values
snake_case_ : Union[str, Any] = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
snake_case_ : str = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
snake_case_ : Tuple = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
snake_case_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : List[Any] = q_bias
snake_case_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case_ : Dict = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
snake_case_ : List[Any] = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
snake_case_ : int = gamma_a
snake_case_ : Any = gamma_a
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : str = dct.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = val
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
"""simple docstring"""
snake_case_ : Optional[Any] = False if """rvlcdip""" in checkpoint_url else True
snake_case_ : Optional[int] = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case_ : Any = 1_0_2_4
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : Any = 2_4
snake_case_ : Optional[int] = 1_6
# labels
if "rvlcdip" in checkpoint_url:
snake_case_ : List[Any] = 1_6
snake_case_ : Tuple = """huggingface/label-files"""
snake_case_ : Tuple = """rvlcdip-id2label.json"""
snake_case_ : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : str = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case_ : Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
snake_case_ : int = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
snake_case_ : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
snake_case_ : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = prepare_img()
snake_case_ : Dict = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
snake_case_ : Optional[int] = encoding["""pixel_values"""]
snake_case_ : int = model(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = outputs.logits
# verify logits
snake_case_ : Any = [1, 1_6] if """rvlcdip""" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
snake_case_ : int = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
snake_case_ : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
a_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 709
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 0
|
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : List[str] = []
snake_case_ : Dict = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
snake_case_ : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , shell=SCREAMING_SNAKE_CASE__ , stdout=subprocess.PIPE )
snake_case_ : Tuple = output.stdout.decode("""utf-8""" )
snake_case_ : Dict = json.loads(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE__ )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ : Optional[int] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return values.split(""",""" )
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
a_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 710
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 711
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 0
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Dict = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ : List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 712
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : int = CpmAntTokenizer
_A : List[Any] = False
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
snake_case_ : str = """今天天气真好!"""
snake_case_ : Dict = ["""今天""", """天气""", """真""", """好""", """!"""]
snake_case_ : Any = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Dict = """今天天气真好!"""
snake_case_ : Union[str, Any] = [tokenizer.bos_token] + tokens
snake_case_ : Union[str, Any] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 713
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Tuple = int(SCREAMING_SNAKE_CASE__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = divmod(SCREAMING_SNAKE_CASE__ , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE__ ) + str(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Optional[int] = str(SCREAMING_SNAKE_CASE__ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
snake_case_ : str = """-""" if number.startswith("""-""" ) else """"""
snake_case_ : Dict = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f'{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE__ ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 714
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 5_0 ):
"""simple docstring"""
snake_case_ : Optional[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 0
|
"""simple docstring"""
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None ):
self.set_matricies(red=lowercase__ , green=lowercase__ , blue=lowercase__ , red_edge=lowercase__ , nir=lowercase__ )
def __UpperCamelCase (self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None ):
if red is not None:
snake_case_ : int = red
if green is not None:
snake_case_ : List[Any] = green
if blue is not None:
snake_case_ : Tuple = blue
if red_edge is not None:
snake_case_ : Any = red_edge
if nir is not None:
snake_case_ : Optional[int] = nir
return True
def __UpperCamelCase (self , lowercase__="" , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None ):
self.set_matricies(red=lowercase__ , green=lowercase__ , blue=lowercase__ , red_edge=lowercase__ , nir=lowercase__ )
snake_case_ : Union[str, Any] = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def __UpperCamelCase (self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __UpperCamelCase (self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __UpperCamelCase (self ):
return self.nir * (self.red / (self.green**2))
def __UpperCamelCase (self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __UpperCamelCase (self ):
return (self.nir - self.red) / (self.nir + self.red)
def __UpperCamelCase (self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def __UpperCamelCase (self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __UpperCamelCase (self ):
return (self.nir - self.green) / (self.nir + self.green)
def __UpperCamelCase (self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __UpperCamelCase (self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __UpperCamelCase (self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __UpperCamelCase (self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __UpperCamelCase (self , lowercase__=0.08 , lowercase__=1.22 , lowercase__=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __UpperCamelCase (self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __UpperCamelCase (self ):
return (self.nir / self.green) - 1
def __UpperCamelCase (self ):
return (self.nir / self.redEdge) - 1
def __UpperCamelCase (self ):
return (self.red - self.blue) / self.red
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __UpperCamelCase (self ):
return self.nir - self.green
def __UpperCamelCase (self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __UpperCamelCase (self ):
snake_case_ : List[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __UpperCamelCase (self , lowercase__=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def __UpperCamelCase (self , lowercase__=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __UpperCamelCase (self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __UpperCamelCase (self , lowercase__=None , lowercase__=None ):
return (self.nir - b) / (a * self.red)
def __UpperCamelCase (self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __UpperCamelCase (self ):
return (self.red + self.green + self.blue) / 30.5
def __UpperCamelCase (self ):
return self.nir / self.red
def __UpperCamelCase (self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def __UpperCamelCase (self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __UpperCamelCase (self ):
return self.green / (self.nir + self.red + self.green)
def __UpperCamelCase (self ):
return self.nir / (self.nir + self.red + self.green)
def __UpperCamelCase (self ):
return self.red / (self.nir + self.red + self.green)
def __UpperCamelCase (self ):
return (self.green - self.red) / (self.green + self.red)
def __UpperCamelCase (self ):
return (self.red - self.green) / (self.red + self.green)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
snake_case_ : Optional[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __UpperCamelCase (self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __UpperCamelCase (self ):
return self.nir / self.red
def __UpperCamelCase (self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def __UpperCamelCase (self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 716
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 0
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
a_ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a_ = spec.loader.load_module()
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a_ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
a_ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ : List[str] = False
# source code of `config_class`
snake_case_ : Dict = inspect.getsource(SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ : int = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : List[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case_ : Optional[Any] = True
break
snake_case_ : int = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ : str = """\n""".join(sorted(SCREAMING_SNAKE_CASE__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 717
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 718
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
snake_case_ : Dict = load_from_cache_file
snake_case_ : List[str] = file_format
snake_case_ : Optional[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __UpperCamelCase (self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ : List[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 719
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase):
"""simple docstring"""
@register_to_config
def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None ):
super().__init__()
snake_case_ : Tuple = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ : Any = torch.zeros(lowercase__ , lowercase__ )
else:
snake_case_ : Optional[int] = None
snake_case_ : Dict = torch.nn.Parameter(lowercase__ )
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : VQModel
_A : CLIPTextModel
_A : CLIPTokenizer
_A : TransformeraDModel
_A : LearnedClassifierFreeSamplingEmbeddings
_A : VQDiffusionScheduler
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
vqvae=lowercase__ , transformer=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , scheduler=lowercase__ , learned_classifier_free_sampling_embeddings=lowercase__ , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = len(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else 1
# get prompt text embeddings
snake_case_ : Union[str, Any] = self.tokenizer(
lowercase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case_ : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
snake_case_ : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ : List[str] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase__ )
# duplicate text embeddings for each generation per prompt
snake_case_ : Tuple = prompt_embeds.repeat_interleave(lowercase__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ : Any = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ : Union[str, Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase__ , 1 , 1 )
else:
snake_case_ : Tuple = [""""""] * batch_size
snake_case_ : Dict = text_input_ids.shape[-1]
snake_case_ : Tuple = self.tokenizer(
lowercase__ , padding="""max_length""" , max_length=lowercase__ , truncation=lowercase__ , return_tensors="""pt""" , )
snake_case_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ : int = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ : Optional[Any] = negative_prompt_embeds.shape[1]
snake_case_ : Optional[int] = negative_prompt_embeds.repeat(1 , lowercase__ , 1 )
snake_case_ : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : Optional[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__(self , lowercase__ , lowercase__ = 1_00 , lowercase__ = 5.0 , lowercase__ = 1.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = 1
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Optional[int] = len(lowercase__ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}' )
snake_case_ : int = batch_size * num_images_per_prompt
snake_case_ : Tuple = guidance_scale > 1.0
snake_case_ : List[Any] = self._encode_prompt(lowercase__ , lowercase__ , lowercase__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase__ , lowercase__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(lowercase__ )}.' )
# get the initial completely masked latents unless the user supplied it
snake_case_ : int = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ : Dict = self.transformer.num_vector_embeds - 1
snake_case_ : Optional[int] = torch.full(lowercase__ , lowercase__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
snake_case_ : Tuple = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase__ , device=self.device )
snake_case_ : Optional[Any] = self.scheduler.timesteps.to(self.device )
snake_case_ : List[str] = latents
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ : Any = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ : int = self.transformer(lowercase__ , encoder_hidden_states=lowercase__ , timestep=lowercase__ ).sample
if do_classifier_free_guidance:
snake_case_ : Union[str, Any] = model_output.chunk(2 )
snake_case_ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase__ , dim=1 , keepdim=lowercase__ )
snake_case_ : str = self.truncate(lowercase__ , lowercase__ )
# remove `log(0)`'s (`-inf`s)
snake_case_ : Union[str, Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Dict = self.scheduler.step(lowercase__ , timestep=lowercase__ , sample=lowercase__ , generator=lowercase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase__ , lowercase__ , lowercase__ )
snake_case_ : Dict = self.vqvae.config.vq_embed_dim
snake_case_ : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ : List[Any] = self.vqvae.quantize.get_codebook_entry(lowercase__ , shape=lowercase__ )
snake_case_ : Dict = self.vqvae.decode(lowercase__ , force_not_quantize=lowercase__ ).sample
snake_case_ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Tuple = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : List[Any] = torch.sort(lowercase__ , 1 , descending=lowercase__ )
snake_case_ : Optional[Any] = torch.exp(lowercase__ )
snake_case_ : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ : Optional[int] = torch.full_like(keep_mask[:, 0:1, :] , lowercase__ )
snake_case_ : int = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ : List[Any] = keep_mask[:, :-1, :]
snake_case_ : List[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ : Optional[int] = log_p_x_0.clone()
snake_case_ : Dict = -torch.inf # -inf = log(0)
return rv
| 720
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 0
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a_ = 6378137.0
a_ = 6356752.314245
a_ = 6378137
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
snake_case_ : Union[str, Any] = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
snake_case_ : List[Any] = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
snake_case_ : Union[str, Any] = haversine_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
snake_case_ : Union[str, Any] = (b_lata + b_lata) / 2
snake_case_ : Optional[int] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
snake_case_ : int = (sin(SCREAMING_SNAKE_CASE__ ) ** 2) * (cos(SCREAMING_SNAKE_CASE__ ) ** 2)
snake_case_ : Optional[Any] = cos(sigma / 2 ) ** 2
snake_case_ : Optional[Any] = (sigma - sin(SCREAMING_SNAKE_CASE__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
snake_case_ : Optional[int] = (cos(SCREAMING_SNAKE_CASE__ ) ** 2) * (sin(SCREAMING_SNAKE_CASE__ ) ** 2)
snake_case_ : List[str] = sin(sigma / 2 ) ** 2
snake_case_ : Optional[Any] = (sigma + sin(SCREAMING_SNAKE_CASE__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : List[str] = KandinskyInpaintPipeline
_A : List[str] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_A : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_A : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_A : Tuple = False
@property
def __UpperCamelCase (self ):
return 32
@property
def __UpperCamelCase (self ):
return 32
@property
def __UpperCamelCase (self ):
return self.time_input_dim
@property
def __UpperCamelCase (self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase (self ):
return 1_00
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __UpperCamelCase (self ):
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
snake_case_ : List[Any] = MultilingualCLIP(lowercase__ )
snake_case_ : Tuple = text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase (self ):
torch.manual_seed(0 )
snake_case_ : Dict = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ : Union[str, Any] = UNetaDConditionModel(**lowercase__ )
return model
@property
def __UpperCamelCase (self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase (self ):
torch.manual_seed(0 )
snake_case_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : Optional[Any] = self.dummy_unet
snake_case_ : int = self.dummy_movq
snake_case_ : Optional[int] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowercase__ , )
snake_case_ : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase (self , lowercase__ , lowercase__=0 ):
snake_case_ : Any = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
snake_case_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase__ )
# create init_image
snake_case_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Dict = Image.fromarray(np.uinta(lowercase__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
snake_case_ : Any = np.ones((64, 64) , dtype=np.floataa )
snake_case_ : List[Any] = 0
if str(lowercase__ ).startswith("""mps""" ):
snake_case_ : Any = torch.manual_seed(lowercase__ )
else:
snake_case_ : Dict = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
snake_case_ : Union[str, Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """cpu"""
snake_case_ : Dict = self.get_dummy_components()
snake_case_ : Any = self.pipeline_class(**lowercase__ )
snake_case_ : Any = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Union[str, Any] = pipe(**self.get_dummy_inputs(lowercase__ ) )
snake_case_ : List[Any] = output.images
snake_case_ : Any = pipe(
**self.get_dummy_inputs(lowercase__ ) , return_dict=lowercase__ , )[0]
snake_case_ : List[str] = image[0, -3:, -3:, -1]
snake_case_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
snake_case_ : Dict = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __UpperCamelCase (self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase (self ):
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
snake_case_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case_ : int = np.ones((7_68, 7_68) , dtype=np.floataa )
snake_case_ : int = 0
snake_case_ : str = """a hat"""
snake_case_ : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase__ )
snake_case_ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
snake_case_ : List[str] = pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
snake_case_ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : Union[str, Any] = pipe_prior(
lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case_ : Optional[int] = pipeline(
lowercase__ , image=lowercase__ , mask_image=lowercase__ , image_embeds=lowercase__ , negative_image_embeds=lowercase__ , generator=lowercase__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
| 700
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """dpr"""
def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Union[str, Any] = projection_dim
snake_case_ : str = position_embedding_type
| 48
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int] ):
"""simple docstring"""
snake_case_ : Dict = len(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if numbers[j] < numbers[i]:
snake_case_ : List[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 701
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : List[Any] = """fnet"""
def __init__(self , lowercase__=3_20_00 , lowercase__=7_68 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=5_12 , lowercase__=4 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=False , lowercase__=5_12 , lowercase__=3 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : str = max_position_embeddings
snake_case_ : List[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = type_vocab_size
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : Dict = use_tpu_fourier_optimizations
snake_case_ : Tuple = tpu_short_seq_length
| 702
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
a_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
a_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
a_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
a_ = [0] * args.vocab_size
for k, v in counter.items():
a_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 48
| 0
|
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a_ = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class __lowercase :
"""simple docstring"""
_A : str
_A : Optional[str] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
def __UpperCamelCase (self ):
snake_case_ : int = _str_to_version_tuple(self.version_str )
def __repr__(self ):
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def __UpperCamelCase (self ):
return self.major, self.minor, self.patch
def __UpperCamelCase (self , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
return Version(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
return other
raise TypeError(f'{other} (type {type(lowercase__ )}) cannot be compared to version.' )
def __eq__(self , lowercase__ ):
try:
snake_case_ : List[Any] = self._validate_operand(lowercase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self , lowercase__ ):
snake_case_ : Optional[Any] = self._validate_operand(lowercase__ )
return self.tuple < other.tuple
def __hash__(self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCamelCase (cls , lowercase__ ):
snake_case_ : Dict = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCamelCase (self ):
return self.version_str
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : List[str] = _VERSION_REG.match(SCREAMING_SNAKE_CASE__ )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(SCREAMING_SNAKE_CASE__ ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return ".".join(str(SCREAMING_SNAKE_CASE__ ) for v in version_tuple )
| 703
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[Any] = {"""text""": """string"""}
snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : List[str] = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ : str = [text_path]
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : int = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Tuple = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : List[str] = {"""text""": """string"""}
snake_case_ : int = features.copy() if features else default_expected_features
snake_case_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : Union[str, Any] = """train"""
snake_case_ : int = {"""train""": text_path, """test""": text_path}
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 48
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : List[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_A : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = AudioClassificationPipeline(model=lowercase__ , feature_extractor=lowercase__ )
# test with a raw waveform
snake_case_ : Union[str, Any] = np.zeros((3_40_00,) )
snake_case_ : List[Any] = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = examples
snake_case_ : Dict = audio_classifier(lowercase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowercase__ , [
{"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )},
{"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )},
] , )
snake_case_ : List[str] = audio_classifier(lowercase__ , top_k=1 )
self.assertEqual(
lowercase__ , [
{"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )},
] , )
self.run_torchaudio(lowercase__ )
@require_torchaudio
def __UpperCamelCase (self , lowercase__ ):
import datasets
# test with a local file
snake_case_ : Optional[int] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
snake_case_ : Union[str, Any] = dataset[0]["""audio"""]["""array"""]
snake_case_ : Union[str, Any] = audio_classifier(lowercase__ )
self.assertEqual(
lowercase__ , [
{"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )},
{"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )},
] , )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : List[str] = """anton-l/wav2vec2-random-tiny-classifier"""
snake_case_ : List[str] = pipeline("""audio-classification""" , model=lowercase__ )
snake_case_ : Dict = np.ones((80_00,) )
snake_case_ : Tuple = audio_classifier(lowercase__ , top_k=4 )
snake_case_ : Dict = [
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
snake_case_ : str = [
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(lowercase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case_ : Union[str, Any] = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
snake_case_ : Tuple = audio_classifier(lowercase__ , top_k=4 )
self.assertIn(nested_simplify(lowercase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __UpperCamelCase (self ):
import datasets
snake_case_ : Union[str, Any] = """superb/wav2vec2-base-superb-ks"""
snake_case_ : List[Any] = pipeline("""audio-classification""" , model=lowercase__ )
snake_case_ : Optional[int] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
snake_case_ : str = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
snake_case_ : Dict = audio_classifier(lowercase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __UpperCamelCase (self ):
pass
| 704
|
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConditionalDetrFeatureExtractor''']
a_ = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 0
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=99 , lowercase__=13 , lowercase__=16 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=2 , lowercase__=32 , lowercase__=4 , lowercase__=4 , lowercase__=30 , lowercase__=0 , lowercase__=1 , lowercase__=2 , lowercase__=None , ):
snake_case_ : Tuple = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[str] = decoder_seq_length
# For common tests
snake_case_ : List[str] = self.decoder_seq_length
snake_case_ : List[Any] = is_training
snake_case_ : List[Any] = use_attention_mask
snake_case_ : Dict = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : List[str] = d_model
snake_case_ : str = d_model
snake_case_ : List[str] = decoder_layers
snake_case_ : Any = decoder_layers
snake_case_ : int = decoder_ffn_dim
snake_case_ : Tuple = decoder_attention_heads
snake_case_ : Union[str, Any] = decoder_attention_heads
snake_case_ : Optional[int] = eos_token_id
snake_case_ : Dict = bos_token_id
snake_case_ : int = pad_token_id
snake_case_ : List[Any] = decoder_start_token_id
snake_case_ : Union[str, Any] = use_cache
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = None
snake_case_ : Tuple = decoder_seq_length
snake_case_ : int = 2
snake_case_ : Optional[Any] = 1
def __UpperCamelCase (self ):
snake_case_ : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : Dict = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : int = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
snake_case_ : Optional[int] = True
snake_case_ : Optional[int] = TrOCRDecoder(config=lowercase__ ).to(lowercase__ ).eval()
snake_case_ : List[str] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case_ : Tuple = model(lowercase__ , use_cache=lowercase__ )
snake_case_ : Optional[Any] = model(lowercase__ )
snake_case_ : Optional[Any] = model(lowercase__ , use_cache=lowercase__ )
self.parent.assertTrue(len(lowercase__ ) == len(lowercase__ ) )
self.parent.assertTrue(len(lowercase__ ) == len(lowercase__ ) + 1 )
snake_case_ : List[str] = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
snake_case_ : int = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : Optional[Any] = model(lowercase__ )["""last_hidden_state"""]
snake_case_ : List[Any] = model(lowercase__ , past_key_values=lowercase__ )["""last_hidden_state"""]
# select random slice
snake_case_ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : List[str] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case_ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowercase__ , lowercase__ , atol=1e-3 )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
snake_case_ : List[str] = config_and_inputs
snake_case_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Optional[int] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_A : Any = (TrOCRForCausalLM,) if is_torch_available() else ()
_A : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
_A : Optional[int] = True
_A : int = False
def __UpperCamelCase (self ):
snake_case_ : Dict = TrOCRStandaloneDecoderModelTester(self , is_training=lowercase__ )
snake_case_ : Tuple = ConfigTester(self , config_class=lowercase__ )
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowercase__ )
def __UpperCamelCase (self ):
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def __UpperCamelCase (self ):
pass
| 706
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_A : str = """ChineseCLIPImageProcessor"""
_A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ):
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase__ , )
snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
snake_case_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase (self , *lowercase__ , **lowercase__ ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , )
return self.image_processor_class
| 48
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a_ = ['''gpt2''']
a_ = '''gpt2'''
if is_tf_available():
class __lowercase ( tf.Module):
"""simple docstring"""
def __init__(self , lowercase__ ):
super().__init__()
snake_case_ : Any = tokenizer
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
snake_case_ : Union[str, Any] = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = self.tokenizer(lowercase__ )
snake_case_ : str = tokenized["""input_ids"""].to_tensor()
snake_case_ : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case_ : str = self.model(input_ids=lowercase__ , attention_mask=lowercase__ )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : List[Any] = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case_ : Optional[Any] = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case_ : Any = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case_ : Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __UpperCamelCase (self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
snake_case_ : List[str] = tokenizer([test_inputs] , return_tensors="""tf""" )
snake_case_ : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case_ : Dict = python_outputs[key].numpy()
snake_case_ : Dict = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __UpperCamelCase (self ):
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : int = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
snake_case_ : int = tf.constant(lowercase__ )
snake_case_ : str = compiled_tokenizer(lowercase__ )
snake_case_ : Dict = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __UpperCamelCase (self ):
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : Optional[int] = ModelToSave(tokenizer=lowercase__ )
snake_case_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case_ : Tuple = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case_ : int = Path(lowercase__ ) / """saved.model"""
tf.saved_model.save(lowercase__ , lowercase__ , signatures={"""serving_default""": model.serving} )
snake_case_ : Tuple = tf.saved_model.load(lowercase__ )
snake_case_ : List[Any] = loaded_model.signatures["""serving_default"""](lowercase__ )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __UpperCamelCase (self ):
for tf_tokenizer in self.tf_tokenizers:
snake_case_ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case_ : List[Any] = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
snake_case_ : Any = tf_tokenizer.get_config()
snake_case_ : List[Any] = TFGPTaTokenizer.from_config(lowercase__ )
snake_case_ : List[str] = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __UpperCamelCase (self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case_ : Tuple = 12_31_23
for max_length in [3, 5, 10_24]:
snake_case_ : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case_ : List[Any] = tf_tokenizer(lowercase__ , max_length=lowercase__ )
snake_case_ : Union[str, Any] = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 708
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""question""": Value("""string"""), """context""": Value("""string""")})
_A : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string"""),
"""answer_start""": Value("""int32"""),
})
})
_A : str = "question"
_A : str = "context"
_A : str = "answers"
@property
def __UpperCamelCase (self ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 709
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**lowercase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase__ , lowercase__ ):
snake_case_ : Tuple = backbone_config.get("""model_type""" )
snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(lowercase__ )
snake_case_ : List[Any] = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = initializer_range
snake_case_ : str = pool_scales
snake_case_ : Dict = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : List[str] = auxiliary_in_channels
snake_case_ : Optional[Any] = auxiliary_channels
snake_case_ : Any = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : List[str] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.backbone_config.to_dict()
snake_case_ : Any = self.__class__.model_type
return output
| 48
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="resnet50" , lowercase__=3 , lowercase__=32 , lowercase__=3 , lowercase__=True , lowercase__=True , ):
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = out_indices if out_indices is not None else [4]
snake_case_ : int = stage_names
snake_case_ : str = out_features
snake_case_ : Optional[Any] = backbone
snake_case_ : Tuple = batch_size
snake_case_ : Union[str, Any] = image_size
snake_case_ : List[str] = num_channels
snake_case_ : str = use_pretrained_backbone
snake_case_ : List[str] = is_training
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[str] = self.get_config()
return config, pixel_values
def __UpperCamelCase (self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : List[str] = TimmBackbone(config=lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
snake_case_ : str = model(lowercase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ : str = config_and_inputs
snake_case_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Union[str, Any] = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_A : Optional[Any] = False
_A : Dict = False
_A : List[Any] = False
_A : int = False
def __UpperCamelCase (self ):
snake_case_ : List[str] = TimmBackboneModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def __UpperCamelCase (self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = """resnet18"""
snake_case_ : Union[str, Any] = """microsoft/resnet-18"""
snake_case_ : Optional[Any] = AutoBackbone.from_pretrained(lowercase__ , use_timm_backbone=lowercase__ )
snake_case_ : Optional[Any] = AutoBackbone.from_pretrained(lowercase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
snake_case_ : Tuple = AutoBackbone.from_pretrained(lowercase__ , use_timm_backbone=lowercase__ , out_indices=[1, 2, 3] )
snake_case_ : List[Any] = AutoBackbone.from_pretrained(lowercase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __UpperCamelCase (self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = model_class(lowercase__ )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = self.has_attentions
# no need to test all models as different heads yield the same functionality
snake_case_ : str = self.all_model_classes[0]
snake_case_ : str = model_class(lowercase__ )
model.to(lowercase__ )
snake_case_ : Optional[Any] = self._prepare_for_class(lowercase__ , lowercase__ )
snake_case_ : List[Any] = model(**lowercase__ )
snake_case_ : str = outputs[0][-1]
# Encoder-/Decoder-only models
snake_case_ : int = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
snake_case_ : str = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : Tuple = model(**lowercase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
snake_case_ : str = copy.deepcopy(lowercase__ )
snake_case_ : List[str] = None
snake_case_ : Tuple = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : str = model(**lowercase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
snake_case_ : Dict = copy.deepcopy(lowercase__ )
snake_case_ : Any = False
snake_case_ : List[str] = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : Any = model(**lowercase__ )
| 710
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case_ : Union[str, Any] = label_idx
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = mode.value
snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : Any = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
snake_case_ : str = []
snake_case_ : List[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
snake_case_ : Optional[Any] = []
snake_case_ : int = []
else:
snake_case_ : Optional[Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowercase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Dict = f.read().splitlines()
if "O" not in labels:
snake_case_ : List[Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
snake_case_ : Any = f.read().splitlines()
if "O" not in labels:
snake_case_ : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[Any] = mode.value
snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' )
snake_case_ : Tuple = 1
snake_case_ : str = []
with open(lowercase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowercase__ ):
snake_case_ : Tuple = []
snake_case_ : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = 0
for sentence in parse_incr(lowercase__ ):
snake_case_ : int = preds_list[example_id]
snake_case_ : Dict = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase (self , lowercase__ ):
if path:
with open(lowercase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 48
| 0
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
a_ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
a_ = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : int = collections.OrderedDict()
snake_case_ : Optional[int] = collections.OrderedDict()
snake_case_ : int = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : Optional[Any] = f.readlines()
snake_case_ : Any = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Dict = b
snake_case_ : List[Any] = idx
for wd in b:
snake_case_ : Optional[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = VOCAB_FILES_NAMES
_A : int = PRETRAINED_VOCAB_FILES_MAP
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self , lowercase__ , lowercase__ , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|startoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ):
super().__init__(
unk_token=lowercase__ , pad_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , do_clean_text=lowercase__ , **lowercase__ , )
if not os.path.isfile(lowercase__ ):
raise ValueError(
f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(lowercase__ ):
raise ValueError(
f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
snake_case_ : Optional[int] = do_clean_text
snake_case_ : Union[str, Any] = load_vocab_and_emoji(lowercase__ , lowercase__ )
snake_case_ : Tuple = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __UpperCamelCase (self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __UpperCamelCase (self ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __UpperCamelCase (self , lowercase__ ):
return self.subword_tokenizer.tokenize(lowercase__ , clean=self.do_clean_text )
def __UpperCamelCase (self , lowercase__ ):
return self.vocab.get(lowercase__ , self.vocab.get(self.unk_token ) )
def __UpperCamelCase (self , lowercase__ ):
return self.subword_tokenizer.convert_id_to_token(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Union[str, Any] = """""".join(lowercase__ ).strip()
return out_string
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
snake_case_ : Dict = input_ids[-self.model_max_length :]
return input_ids
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : int = 0
if os.path.isdir(lowercase__ ):
snake_case_ : Optional[int] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Tuple = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
snake_case_ : Optional[Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
snake_case_ : Union[str, Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
snake_case_ : int = token_index
writer.write(""",""".join(lowercase__ ) + """\n""" )
index += 1
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , lowercase__ )
return vocab_file, emoji_file
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : int = vocab # same as swe
snake_case_ : Tuple = ids_to_tokens # same as bpe
snake_case_ : Any = emoji
snake_case_ : Any = np.max([len(lowercase__ ) for w in self.vocab.keys()] )
snake_case_ : Optional[int] = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
snake_case_ : Dict = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
snake_case_ : Optional[Any] = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
snake_case_ : Union[str, Any] = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
snake_case_ : List[Any] = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
snake_case_ : str = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
snake_case_ : int = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
snake_case_ : str = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
snake_case_ : Any = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__(self ):
return len(self.ids_to_tokens )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Any = self.content_repattera.sub("""<URL>""" , lowercase__ )
snake_case_ : Tuple = self.content_repattera.sub("""<EMAIL>""" , lowercase__ )
snake_case_ : Union[str, Any] = self.content_repattera.sub("""<TEL>""" , lowercase__ )
snake_case_ : Dict = self.content_repattera.sub("""<DATE>""" , lowercase__ )
snake_case_ : Tuple = self.content_repattera.sub("""<DATE>""" , lowercase__ )
snake_case_ : Tuple = self.content_repattera.sub("""<PRICE>""" , lowercase__ )
snake_case_ : Optional[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
snake_case_ : Tuple = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def __UpperCamelCase (self , lowercase__ , lowercase__=False ):
snake_case_ : Optional[int] = text.replace(""" """ , """<SP>""" )
snake_case_ : Optional[int] = text.replace(""" """ , """<SP>""" )
snake_case_ : Any = text.replace("""\r\n""" , """<BR>""" )
snake_case_ : Dict = text.replace("""\n""" , """<BR>""" )
snake_case_ : Optional[Any] = text.replace("""\r""" , """<BR>""" )
snake_case_ : Optional[Any] = text.replace("""\t""" , """<TAB>""" )
snake_case_ : List[str] = text.replace("""—""" , """ー""" )
snake_case_ : Union[str, Any] = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
snake_case_ : Union[str, Any] = text.replace(lowercase__ , lowercase__ )
if clean:
snake_case_ : Optional[Any] = self.clean_text(lowercase__ )
def check_simbol(lowercase__ ):
snake_case_ : int = x.encode()
if len(lowercase__ ) == 1 and len(lowercase__ ) == 2:
snake_case_ : Tuple = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(lowercase__ ):
snake_case_ : int = x.encode()
if len(lowercase__ ) == 1 and len(lowercase__ ) == 3:
snake_case_ : Union[str, Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
snake_case_ : Optional[Any] = 0
snake_case_ : List[str] = []
while pos < len(lowercase__ ):
snake_case_ : Tuple = min(len(lowercase__ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
snake_case_ : Optional[Any] = [] # (token_id, token, pos)
for e in range(lowercase__ , lowercase__ , -1 ):
snake_case_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowercase__ ) > 2:
snake_case_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowercase__ ) > 0:
# the smallest token_id is adopted
snake_case_ : Optional[int] = sorted(lowercase__ , key=lambda lowercase__ : x[0] )[0]
result.append(lowercase__ )
snake_case_ : Union[str, Any] = e
else:
snake_case_ : Optional[Any] = pos + 1
snake_case_ : Tuple = text[pos:end]
if check_simbol(lowercase__ ):
result.append("""<KIGOU>""" )
elif checkuae(lowercase__ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
snake_case_ : Tuple = end
return result
def __UpperCamelCase (self , lowercase__ , lowercase__="\n" ):
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
snake_case_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowercase__ ) > 0:
words.append(bytearray(lowercase__ ).decode("""utf-8""" , errors="""replace""" ) )
snake_case_ : Tuple = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(lowercase__ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(lowercase__ )
if len(lowercase__ ) > 0:
words.append(bytearray(lowercase__ ).decode("""utf-8""" , errors="""replace""" ) )
snake_case_ : str = """""".join(lowercase__ )
return text
| 711
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48
| 0
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : List[Any] = """mask2former"""
_A : List[Any] = ["""swin"""]
_A : Any = {"""hidden_size""": """hidden_dim"""}
def __init__(self , lowercase__ = None , lowercase__ = 2_56 , lowercase__ = 2_56 , lowercase__ = 2_56 , lowercase__ = 10_24 , lowercase__ = "relu" , lowercase__ = 6 , lowercase__ = 10 , lowercase__ = 8 , lowercase__ = 0.0 , lowercase__ = 20_48 , lowercase__ = False , lowercase__ = False , lowercase__ = 4 , lowercase__ = 2_55 , lowercase__ = 1_00 , lowercase__ = 0.1 , lowercase__ = 2.0 , lowercase__ = 5.0 , lowercase__ = 5.0 , lowercase__ = 1_25_44 , lowercase__ = 3.0 , lowercase__ = 0.75 , lowercase__ = 0.02 , lowercase__ = 1.0 , lowercase__ = True , lowercase__ = [4, 8, 16, 32] , lowercase__ = None , **lowercase__ , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
snake_case_ : Dict = CONFIG_MAPPING["""swin"""](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowercase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : int = backbone_config.pop("""model_type""" )
snake_case_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : int = config_class.from_dict(lowercase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
snake_case_ : Optional[Any] = backbone_config
snake_case_ : List[str] = feature_size
snake_case_ : Optional[Any] = mask_feature_size
snake_case_ : List[str] = hidden_dim
snake_case_ : str = encoder_feedforward_dim
snake_case_ : str = activation_function
snake_case_ : Optional[int] = encoder_layers
snake_case_ : Dict = decoder_layers
snake_case_ : int = num_attention_heads
snake_case_ : str = dropout
snake_case_ : Optional[int] = dim_feedforward
snake_case_ : Tuple = pre_norm
snake_case_ : List[Any] = enforce_input_projection
snake_case_ : Tuple = common_stride
snake_case_ : Optional[int] = ignore_value
snake_case_ : Tuple = num_queries
snake_case_ : str = no_object_weight
snake_case_ : List[str] = class_weight
snake_case_ : List[str] = mask_weight
snake_case_ : str = dice_weight
snake_case_ : Tuple = train_num_points
snake_case_ : List[str] = oversample_ratio
snake_case_ : str = importance_sample_ratio
snake_case_ : str = init_std
snake_case_ : int = init_xavier_std
snake_case_ : Tuple = use_auxiliary_loss
snake_case_ : Optional[int] = feature_strides
snake_case_ : Any = output_auxiliary_logits
snake_case_ : List[Any] = decoder_layers
super().__init__(**lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , **lowercase__ ):
return cls(
backbone_config=lowercase__ , **lowercase__ , )
def __UpperCamelCase (self ):
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : int = self.backbone_config.to_dict()
snake_case_ : str = self.__class__.model_type
return output
| 712
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a_ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_A : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_A : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_A : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCamelCase (self ):
snake_case_ : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
snake_case_ : Any = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
snake_case_ : Dict = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
snake_case_ : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
snake_case_ : Tuple = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
snake_case_ : Optional[Any] = text_classifier("""This is great !""" , return_all_scores=lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
snake_case_ : List[Any] = text_classifier("""This is great !""" , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
snake_case_ : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
snake_case_ : Optional[int] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def __UpperCamelCase (self ):
import torch
snake_case_ : Optional[int] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
snake_case_ : Optional[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def __UpperCamelCase (self ):
snake_case_ : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
snake_case_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def __UpperCamelCase (self ):
snake_case_ : Tuple = pipeline("""text-classification""" )
snake_case_ : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
snake_case_ : List[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
snake_case_ : List[str] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def __UpperCamelCase (self ):
snake_case_ : int = pipeline("""text-classification""" , framework="""tf""" )
snake_case_ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
snake_case_ : List[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
snake_case_ : Optional[int] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : int = TextClassificationPipeline(model=lowercase__ , tokenizer=lowercase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
snake_case_ : Union[str, Any] = """HuggingFace is in"""
snake_case_ : str = text_classifier(lowercase__ )
self.assertEqual(nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
snake_case_ : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
snake_case_ : List[str] = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}, {"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
snake_case_ : List[Any] = text_classifier(lowercase__ , top_k=lowercase__ )
snake_case_ : int = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowercase__ ) , [[{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] * N, [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] * N] , )
snake_case_ : List[str] = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
snake_case_ : Optional[int] = text_classifier(lowercase__ )
self.assertEqual(
nested_simplify(lowercase__ ) , {"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
snake_case_ : Optional[int] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(lowercase__ ):
text_classifier(lowercase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
snake_case_ : Union[str, Any] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(lowercase__ ) , [{"""label""": ANY(lowercase__ ), """score""": ANY(lowercase__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 713
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Dict = VQModel
_A : Union[str, Any] = """sample"""
@property
def __UpperCamelCase (self , lowercase__=(32, 32) ):
snake_case_ : int = 4
snake_case_ : Union[str, Any] = 3
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase__ )
return {"sample": image}
@property
def __UpperCamelCase (self ):
return (3, 32, 32)
@property
def __UpperCamelCase (self ):
return (3, 32, 32)
def __UpperCamelCase (self ):
snake_case_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
snake_case_ : int = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowercase__ )
snake_case_ : Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(lowercase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case_ : int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case_ : str = image.to(lowercase__ )
with torch.no_grad():
snake_case_ : Dict = model(lowercase__ ).sample
snake_case_ : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ : int = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
| 714
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a_ = '''>>zh<<'''
a_ = '''Helsinki-NLP/'''
if is_torch_available():
a_ = '''pt'''
elif is_tf_available():
a_ = '''tf'''
else:
a_ = '''jax'''
@require_sentencepiece
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : str = MarianTokenizer
_A : List[str] = False
_A : List[str] = True
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase (self , **lowercase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = """</s>"""
snake_case_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def __UpperCamelCase (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
snake_case_ : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __UpperCamelCase (self ):
# fmt: off
snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __UpperCamelCase (self ):
snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ : Dict = """Tämä on testi"""
snake_case_ : List[Any] = """This is a test"""
snake_case_ : Optional[int] = [76, 7, 20_47, 2]
snake_case_ : List[str] = [69, 12, 11, 9_40, 2]
snake_case_ : Any = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 48
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[Any] = 0
for i in range(1 , 1_0_0_1 ):
total += i**i
return str(SCREAMING_SNAKE_CASE__ )[-1_0:]
if __name__ == "__main__":
print(solution())
| 715
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_A : ClassVar[Features] = Features({"""audio""": Audio()})
_A : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_A : str = "audio"
_A : str = "transcription"
def __UpperCamelCase (self , lowercase__ ):
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : Optional[int] = copy.deepcopy(self )
snake_case_ : Tuple = self.input_schema.copy()
snake_case_ : List[str] = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def __UpperCamelCase (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
| 0
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : List[str] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __UpperCamelCase (self , lowercase__=0 ):
snake_case_ : List[str] = np.random.RandomState(lowercase__ )
snake_case_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase (self ):
snake_case_ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs()
snake_case_ : Dict = pipe(**lowercase__ ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : Union[str, Any] = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case_ : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs()
snake_case_ : str = pipe(**lowercase__ ).images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : Tuple = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case_ : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : int = self.get_dummy_inputs()
snake_case_ : Any = pipe(**lowercase__ ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : Dict = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case_ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs()
snake_case_ : Optional[Any] = pipe(**lowercase__ ).images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : Tuple = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Union[str, Any] = self.get_dummy_inputs()
snake_case_ : Optional[Any] = pipe(**lowercase__ ).images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : str = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : int = self.get_dummy_inputs()
snake_case_ : Optional[int] = pipe(**lowercase__ ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
snake_case_ : Tuple = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase (self ):
snake_case_ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Tuple = self.get_dummy_inputs()
snake_case_ : List[Any] = 3 * [inputs["""prompt"""]]
# forward
snake_case_ : Optional[Any] = pipe(**lowercase__ )
snake_case_ : Optional[Any] = output.images[0, -3:, -3:, -1]
snake_case_ : Optional[int] = self.get_dummy_inputs()
snake_case_ : Optional[int] = 3 * [inputs.pop("""prompt""" )]
snake_case_ : str = pipe.tokenizer(
lowercase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors="""np""" , )
snake_case_ : Any = text_inputs["""input_ids"""]
snake_case_ : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
snake_case_ : Optional[int] = prompt_embeds
# forward
snake_case_ : Optional[int] = pipe(**lowercase__ )
snake_case_ : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def __UpperCamelCase (self ):
snake_case_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Tuple = self.get_dummy_inputs()
snake_case_ : List[Any] = 3 * ["""this is a negative prompt"""]
snake_case_ : Any = negative_prompt
snake_case_ : Dict = 3 * [inputs["""prompt"""]]
# forward
snake_case_ : Optional[int] = pipe(**lowercase__ )
snake_case_ : List[Any] = output.images[0, -3:, -3:, -1]
snake_case_ : Optional[int] = self.get_dummy_inputs()
snake_case_ : List[Any] = 3 * [inputs.pop("""prompt""" )]
snake_case_ : List[Any] = []
for p in [prompt, negative_prompt]:
snake_case_ : str = pipe.tokenizer(
lowercase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors="""np""" , )
snake_case_ : Any = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
snake_case_ : str = embeds
# forward
snake_case_ : str = pipe(**lowercase__ )
snake_case_ : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase (self ):
snake_case_ : str = ort.SessionOptions()
snake_case_ : Any = False
return options
def __UpperCamelCase (self ):
# using the PNDM scheduler by default
snake_case_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Any = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
snake_case_ : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
snake_case_ : List[Any] = output.images
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ : Dict = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase (self ):
snake_case_ : str = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : List[Any] = """open neural network exchange"""
snake_case_ : str = np.random.RandomState(0 )
snake_case_ : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase__ , output_type="""np""" )
snake_case_ : int = output.images
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ : str = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Dict = """open neural network exchange"""
snake_case_ : Any = np.random.RandomState(0 )
snake_case_ : str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase__ , output_type="""np""" )
snake_case_ : Union[str, Any] = output.images
snake_case_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case_ : Optional[int] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase (self ):
snake_case_ : str = 0
def test_callback_fn(lowercase__ , lowercase__ , lowercase__ ) -> None:
snake_case_ : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
snake_case_ : str = latents[0, -3:, -3:, -1]
snake_case_ : str = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
snake_case_ : Union[str, Any] = latents[0, -3:, -3:, -1]
snake_case_ : Optional[Any] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
snake_case_ : Union[str, Any] = False
snake_case_ : Any = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__ )
snake_case_ : Optional[int] = """Andromeda galaxy in a bottle"""
snake_case_ : Union[str, Any] = np.random.RandomState(0 )
pipe(
prompt=lowercase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowercase__ , callback=lowercase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowercase__ , lowercase__ )
assert pipe.safety_checker is None
snake_case_ : int = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__ )
snake_case_ : int = OnnxStableDiffusionPipeline.from_pretrained(lowercase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
snake_case_ : Optional[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 716
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None ):
if components is None:
snake_case_ : Any = []
snake_case_ : Dict = list(lowercase__ )
def __len__(self ):
return len(self.__components )
def __str__(self ):
return "(" + ",".join(map(lowercase__ , self.__components ) ) + ")"
def __add__(self , lowercase__ ):
snake_case_ : Any = len(self )
if size == len(lowercase__ ):
snake_case_ : Union[str, Any] = [self.__components[i] + other.component(lowercase__ ) for i in range(lowercase__ )]
return Vector(lowercase__ )
else:
raise Exception("""must have the same size""" )
def __sub__(self , lowercase__ ):
snake_case_ : Optional[Any] = len(self )
if size == len(lowercase__ ):
snake_case_ : str = [self.__components[i] - other.component(lowercase__ ) for i in range(lowercase__ )]
return Vector(lowercase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__(self , lowercase__ ):
...
@overload
def __mul__(self , lowercase__ ):
...
def __mul__(self , lowercase__ ):
if isinstance(lowercase__ , (float, int) ):
snake_case_ : str = [c * other for c in self.__components]
return Vector(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ) and len(self ) == len(lowercase__ ):
snake_case_ : str = len(self )
snake_case_ : List[Any] = [self.__components[i] * other.component(lowercase__ ) for i in range(lowercase__ )]
return sum(lowercase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __UpperCamelCase (self ):
return Vector(self.__components )
def __UpperCamelCase (self , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
assert -len(self.__components ) <= pos < len(self.__components )
snake_case_ : Any = value
def __UpperCamelCase (self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
snake_case_ : int = [c**2 for c in self.__components]
return math.sqrt(sum(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ , lowercase__ = False ):
snake_case_ : str = self * other
snake_case_ : Dict = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
snake_case_ : int = [0] * dimension
snake_case_ : Tuple = 1
return Vector(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = [random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = matrix
snake_case_ : List[Any] = w
snake_case_ : List[Any] = h
def __str__(self ):
snake_case_ : Union[str, Any] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__(self , lowercase__ ):
if self.__width == other.width() and self.__height == other.height():
snake_case_ : List[str] = []
for i in range(self.__height ):
snake_case_ : List[Any] = [
self.__matrix[i][j] + other.component(lowercase__ , lowercase__ )
for j in range(self.__width )
]
matrix.append(lowercase__ )
return Matrix(lowercase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__(self , lowercase__ ):
if self.__width == other.width() and self.__height == other.height():
snake_case_ : List[str] = []
for i in range(self.__height ):
snake_case_ : Dict = [
self.__matrix[i][j] - other.component(lowercase__ , lowercase__ )
for j in range(self.__width )
]
matrix.append(lowercase__ )
return Matrix(lowercase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__(self , lowercase__ ):
...
@overload
def __mul__(self , lowercase__ ):
...
def __mul__(self , lowercase__ ):
if isinstance(lowercase__ , lowercase__ ): # matrix-vector
if len(lowercase__ ) == self.__width:
snake_case_ : Optional[int] = zero_vector(self.__height )
for i in range(self.__height ):
snake_case_ : List[str] = [
self.__matrix[i][j] * other.component(lowercase__ )
for j in range(self.__width )
]
ans.change_component(lowercase__ , sum(lowercase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowercase__ , (int, float) ): # matrix-scalar
snake_case_ : Optional[int] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowercase__ , self.__width , self.__height )
return None
def __UpperCamelCase (self ):
return self.__height
def __UpperCamelCase (self ):
return self.__width
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
snake_case_ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
snake_case_ : int = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowercase__ ) ):
snake_case_ : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowercase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowercase__ , lowercase__ )
else:
raise Exception("""Indices out of bounds""" )
def __UpperCamelCase (self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
snake_case_ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowercase__ ) for y in range(self.__width )
]
return sum(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : list[list[float]] = [[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE__ )
snake_case_ : list[list[float]] = [
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 717
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48
| 0
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , *lowercase__ , **lowercase__ ):
super().__init__(*lowercase__ , **lowercase__ )
snake_case_ : Union[str, Any] = {}
def __UpperCamelCase (self , lowercase__ , *lowercase__ , **lowercase__ ):
snake_case_ : Optional[int] = super().add_tokens(lowercase__ , *lowercase__ , **lowercase__ )
if num_added_tokens == 0:
raise ValueError(
f'The tokenizer already contains the token {placeholder_token}. Please pass a different'
""" `placeholder_token` that is not already in the tokenizer.""" )
def __UpperCamelCase (self , lowercase__ , *lowercase__ , lowercase__=1 , **lowercase__ ):
snake_case_ : Optional[int] = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowercase__ , *lowercase__ , **lowercase__ )
output.append(lowercase__ )
else:
snake_case_ : Dict = []
for i in range(lowercase__ ):
snake_case_ : Union[str, Any] = placeholder_token + f'_{i}'
self.try_adding_tokens(lowercase__ , *lowercase__ , **lowercase__ )
output.append(lowercase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'The tokenizer already has placeholder token {token} that can get confused with'
f' {placeholder_token}keep placeholder tokens independent' )
snake_case_ : str = output
def __UpperCamelCase (self , lowercase__ , lowercase__=False , lowercase__=1.0 ):
if isinstance(lowercase__ , lowercase__ ):
snake_case_ : Dict = []
for i in range(len(lowercase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
snake_case_ : str = self.token_map[placeholder_token]
snake_case_ : str = tokens[: 1 + int(len(lowercase__ ) * prop_tokens_to_load )]
if vector_shuffle:
snake_case_ : Optional[Any] = copy.copy(lowercase__ )
random.shuffle(lowercase__ )
snake_case_ : Optional[int] = text.replace(lowercase__ , """ """.join(lowercase__ ) )
return text
def __call__(self , lowercase__ , *lowercase__ , lowercase__=False , lowercase__=1.0 , **lowercase__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowercase__ , vector_shuffle=lowercase__ , prop_tokens_to_load=lowercase__ ) , *lowercase__ , **lowercase__ , )
def __UpperCamelCase (self , lowercase__ , *lowercase__ , lowercase__=False , lowercase__=1.0 , **lowercase__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
lowercase__ , vector_shuffle=lowercase__ , prop_tokens_to_load=lowercase__ ) , *lowercase__ , **lowercase__ , )
| 718
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : Union[str, Any] = data
snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __UpperCamelCase (lowercase__ , lowercase__ ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __UpperCamelCase (self ):
snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase (self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.padding()
snake_case_ : Any = self.split_blocks()
for block in self.blocks:
snake_case_ : Any = self.expand_block(lowercase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ : Optional[Any] = (b & c) | ((~b) & d)
snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ : Union[str, Any] = b ^ c ^ d
snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
snake_case_ : str = (b & c) | (b & d) | (c & d)
snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
snake_case_ : Tuple = b ^ c ^ d
snake_case_ : str = 0Xc_a_6_2_c_1_d_6
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (
self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(lowercase__ , 30 ),
c,
d,
)
snake_case_ : Any = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
snake_case_ : List[str] = f.read()
else:
snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Dict = CLIPTokenizer
_A : Any = CLIPTokenizerFast
_A : List[Any] = True
_A : Optional[Any] = {}
_A : int = False
def __UpperCamelCase (self ):
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case_ : List[Any] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
snake_case_ : Optional[int] = {"""unk_token""": """<unk>"""}
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase__ ) )
def __UpperCamelCase (self , **lowercase__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , **lowercase__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = """lower newer"""
snake_case_ : str = """lower newer"""
return input_text, output_text
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Optional[Any] = """lower newer"""
snake_case_ : Union[str, Any] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
snake_case_ : Any = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Any = tokens + [tokenizer.unk_token]
snake_case_ : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@require_ftfy
def __UpperCamelCase (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : List[str] = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
snake_case_ : List[Any] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
snake_case_ : List[Any] = tokenizer_s.tokenize(lowercase__ )
snake_case_ : Optional[Any] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case_ : str = """xa\u0303y""" + """ """ + """x\xe3y"""
snake_case_ : Dict = tokenizer_s.tokenize(lowercase__ )
snake_case_ : List[str] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of space type
snake_case_ : Optional[Any] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case_ : Tuple = tokenizer_s.tokenize(lowercase__ )
snake_case_ : List[Any] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of line break type
snake_case_ : Dict = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case_ : Any = tokenizer_s.tokenize(lowercase__ )
snake_case_ : Optional[int] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Dict = f'{text_of_1_token} {text_of_1_token}'
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
snake_case_ : Union[str, Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
snake_case_ : Optional[int] = f' {text}'
snake_case_ : int = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
snake_case_ : List[Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
def __UpperCamelCase (self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowercase__ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __UpperCamelCase (self ):
super().test_tokenization_python_rust_equals()
def __UpperCamelCase (self ):
# CLIP always lower cases letters
pass
| 719
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a_ = True
except (ImportError, ModuleNotFoundError):
a_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
re.sub("""<n>""" , """""" , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) )
| 720
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48
| 0
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
a_ = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : Optional[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
snake_case_ : Dict = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , SCREAMING_SNAKE_CASE__ , )
is not None
):
snake_case_ : Tuple = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
snake_case_ : Tuple = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
snake_case_ : int = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
snake_case_ : str = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
snake_case_ : str = True
if not attribute_used:
snake_case_ : Dict = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
snake_case_ : List[str] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
snake_case_ : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
snake_case_ : Optional[int] = True
elif attribute.endswith("""_token_id""" ):
snake_case_ : Tuple = True
# configuration class specific cases
if not case_allowed:
snake_case_ : List[str] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
snake_case_ : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : List[str] = dict(inspect.signature(config_class.__init__ ).parameters )
snake_case_ : List[Any] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
snake_case_ : List[str] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
snake_case_ : Optional[Any] = {}
if len(config_class.attribute_map ) > 0:
snake_case_ : Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
snake_case_ : str = inspect.getsourcefile(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = os.path.dirname(SCREAMING_SNAKE_CASE__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
snake_case_ : List[str] = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for fn in os.listdir(SCREAMING_SNAKE_CASE__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
snake_case_ : Union[str, Any] = []
for path in modeling_paths:
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ ) as fp:
modeling_sources.append(fp.read() )
snake_case_ : Any = []
for config_param, default_value in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# `attributes` here is all the variant names for `config_param`
snake_case_ : int = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
unused_attributes.append(attributes[0] )
return sorted(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[int] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
snake_case_ : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE__ : inspect.isclass(SCREAMING_SNAKE_CASE__ )
and issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and inspect.getmodule(SCREAMING_SNAKE_CASE__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
snake_case_ : int = check_config_attributes_being_used(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ : Optional[Any] = unused_attributes
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ : Optional[int] = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
check_config_attributes()
| 721
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCAmelCase :
def __init__( self : str , __snake_case : Optional[int] , __snake_case : Optional[int]=13 , __snake_case : Dict=7 , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Union[str, Any]=True , __snake_case : Optional[int]=True , __snake_case : int=99 , __snake_case : Union[str, Any]=32 , __snake_case : Tuple=2 , __snake_case : Optional[int]=4 , __snake_case : int=37 , __snake_case : Optional[int]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : str=512 , __snake_case : Optional[int]=16 , __snake_case : Optional[int]=2 , __snake_case : List[str]=0.0_2 , __snake_case : Optional[Any]=3 , __snake_case : Any=4 , __snake_case : Optional[Any]=None , ):
lowerCamelCase :List[Any] = parent
lowerCamelCase :Optional[Any] = 13
lowerCamelCase :Union[str, Any] = 7
lowerCamelCase :str = True
lowerCamelCase :Optional[int] = True
lowerCamelCase :Optional[int] = True
lowerCamelCase :List[str] = True
lowerCamelCase :Union[str, Any] = 99
lowerCamelCase :str = 384
lowerCamelCase :List[str] = 2
lowerCamelCase :Optional[Any] = 4
lowerCamelCase :Any = 37
lowerCamelCase :List[Any] = '''gelu'''
lowerCamelCase :Dict = 0.1
lowerCamelCase :Optional[int] = 0.1
lowerCamelCase :List[Any] = 512
lowerCamelCase :List[Any] = 16
lowerCamelCase :Union[str, Any] = 2
lowerCamelCase :Any = 0.0_2
lowerCamelCase :Union[str, Any] = 3
lowerCamelCase :Optional[int] = 4
lowerCamelCase :int = 128
lowerCamelCase :List[Any] = 2
lowerCamelCase :Optional[int] = 9
lowerCamelCase :List[Any] = 1
lowerCamelCase :Dict = None
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :Optional[Any] = None
if self.use_input_mask:
lowerCamelCase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase :int = None
if self.use_token_type_ids:
lowerCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase :Optional[int] = None
lowerCamelCase :List[Any] = None
lowerCamelCase :Union[str, Any] = None
if self.use_labels:
lowerCamelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase :int = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase :List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Dict , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
lowerCamelCase :int = TFConvBertModel(config=__snake_case )
lowerCamelCase :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase :Any = [input_ids, input_mask]
lowerCamelCase :Dict = model(__snake_case )
lowerCamelCase :List[str] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] ):
lowerCamelCase :str = TFConvBertForMaskedLM(config=__snake_case )
lowerCamelCase :Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase :List[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : Tuple ):
lowerCamelCase :Any = self.num_labels
lowerCamelCase :List[str] = TFConvBertForSequenceClassification(config=__snake_case )
lowerCamelCase :int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase :str = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : int ):
lowerCamelCase :Union[str, Any] = self.num_choices
lowerCamelCase :int = TFConvBertForMultipleChoice(config=__snake_case )
lowerCamelCase :int = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase :Optional[Any] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase :Union[str, Any] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase :List[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCamelCase :Optional[int] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[int] ):
lowerCamelCase :List[Any] = self.num_labels
lowerCamelCase :Any = TFConvBertForTokenClassification(config=__snake_case )
lowerCamelCase :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Any , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] ):
lowerCamelCase :Dict = TFConvBertForQuestionAnswering(config=__snake_case )
lowerCamelCase :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCamelCase :List[Any] = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) :str = config_and_inputs
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : int ):
lowerCamelCase :str = TFConvBertModelTester(self )
lowerCamelCase :str = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def snake_case ( self : Any ):
lowerCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def snake_case ( self : Any ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def snake_case ( self : int ):
lowerCamelCase , lowerCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :List[str] = True
lowerCamelCase :List[Any] = True
if hasattr(__snake_case , '''use_cache''' ):
lowerCamelCase :List[Any] = True
lowerCamelCase :int = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowerCamelCase :Any = getattr(self.model_tester , '''key_length''' , __snake_case )
for model_class in self.all_model_classes:
lowerCamelCase :Dict = self._prepare_for_class(__snake_case , __snake_case )
lowerCamelCase :Dict = model_class(__snake_case )
lowerCamelCase :Optional[int] = len(model(__snake_case ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case , saved_model=__snake_case )
lowerCamelCase :Optional[Any] = os.path.join(__snake_case , '''saved_model''' , '''1''' )
lowerCamelCase :str = tf.keras.models.load_model(__snake_case )
lowerCamelCase :List[Any] = model(__snake_case )
if self.is_encoder_decoder:
lowerCamelCase :List[Any] = outputs['''encoder_hidden_states''']
lowerCamelCase :Optional[int] = outputs['''encoder_attentions''']
else:
lowerCamelCase :Any = outputs['''hidden_states''']
lowerCamelCase :Union[str, Any] = outputs['''attentions''']
self.assertEqual(len(__snake_case ) , __snake_case )
lowerCamelCase :Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :int = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(__snake_case )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase , lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :Optional[int] = True
lowerCamelCase :Optional[Any] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
lowerCamelCase :List[Any] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowerCamelCase :int = getattr(self.model_tester , '''key_length''' , __snake_case )
lowerCamelCase :List[Any] = getattr(self.model_tester , '''key_length''' , __snake_case )
def check_decoder_attentions_output(__snake_case : Union[str, Any] ):
lowerCamelCase :Optional[Any] = len(__snake_case )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase :Dict = outputs.decoder_attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__snake_case : List[Any] ):
lowerCamelCase :Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase :Tuple = True
lowerCamelCase :Dict = False
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = model(self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = len(__snake_case )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
if self.is_encoder_decoder:
lowerCamelCase :int = model_class(__snake_case )
lowerCamelCase :str = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_decoder_attentions_output(__snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase :int = True
lowerCamelCase :Optional[int] = model_class(__snake_case )
lowerCamelCase :List[Any] = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Any = True
lowerCamelCase :Dict = True
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :int = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__snake_case ) )
self.assertEqual(model.config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : List[Any] ):
lowerCamelCase :Dict = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowerCamelCase :Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase :Optional[int] = model(__snake_case )[0]
lowerCamelCase :Optional[int] = [1, 6, 768]
self.assertEqual(output.shape , __snake_case )
lowerCamelCase :Optional[Any] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1e-4 )
| 49
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'ibert'
def __init__( self : Tuple , __snake_case : Optional[int]=30522 , __snake_case : List[Any]=768 , __snake_case : Union[str, Any]=12 , __snake_case : Tuple=12 , __snake_case : List[str]=3072 , __snake_case : int="gelu" , __snake_case : int=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[Any]=512 , __snake_case : Any=2 , __snake_case : Union[str, Any]=0.0_2 , __snake_case : Optional[Any]=1e-1_2 , __snake_case : List[str]=1 , __snake_case : Optional[Any]=0 , __snake_case : List[Any]=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=False , __snake_case : List[str]="none" , **__snake_case : Dict , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Tuple = vocab_size
lowerCamelCase :List[str] = hidden_size
lowerCamelCase :Optional[int] = num_hidden_layers
lowerCamelCase :int = num_attention_heads
lowerCamelCase :Tuple = hidden_act
lowerCamelCase :Optional[Any] = intermediate_size
lowerCamelCase :Optional[int] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :int = max_position_embeddings
lowerCamelCase :Any = type_vocab_size
lowerCamelCase :int = initializer_range
lowerCamelCase :Optional[Any] = layer_norm_eps
lowerCamelCase :Union[str, Any] = position_embedding_type
lowerCamelCase :List[Any] = quant_mode
lowerCamelCase :Union[str, Any] = force_dequant
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : List[Any] ):
if self.task == "multiple-choice":
lowerCamelCase :List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any ):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=__snake_case , )
assert hasattr(self , '''env''' )
def snake_case ( self : List[str] , __snake_case : List[str]=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def snake_case ( self : str , __snake_case : Tuple ):
TrainingJobAnalytics(__snake_case ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def snake_case ( self : Union[str, Any] ):
# create estimator
lowerCamelCase :List[str] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowerCamelCase :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase :Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCamelCase :int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase :Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __snake_case )
| 49
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Dict = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
lowerCamelCase :List[str] = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase :Optional[Any] = model(__snake_case )['''last_hidden_state''']
lowerCamelCase :Dict = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice.
lowerCamelCase :Union[str, Any] = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 49
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 1
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A__ = float("""nan""")
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , __snake_case : Any ):
lowerCamelCase :Optional[int] = sys.stdout
lowerCamelCase :Optional[int] = open(__snake_case , '''a''' )
def __getattr__( self : str , __snake_case : List[str] ):
return getattr(self.stdout , __snake_case )
def snake_case ( self : Optional[int] , __snake_case : Dict ):
self.stdout.write(__snake_case )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __snake_case , 0 , re.M ) )
def _lowerCamelCase ( a_ : Tuple=80 , a_ : str=False):
lowerCamelCase :Tuple = []
# deal with critical env vars
lowerCamelCase :Dict = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
lowerCamelCase :Optional[int] = os.environ.get(a_ , a_)
if val is not None:
cmd.append(F"{key}={val}")
# python executable (not always needed if the script is executable)
lowerCamelCase :str = sys.executable if full_python_path else sys.executable.split('''/''')[-1]
cmd.append(a_)
# now the normal args
cmd += list(map(shlex.quote , sys.argv))
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase :List[Any] = []
lowerCamelCase :Optional[int] = ''''''
while len(a_) > 0:
current_line += F"{cmd.pop(0)} "
if len(a_) == 0 or len(a_) + len(cmd[0]) + 1 > max_width - 1:
lines.append(a_)
lowerCamelCase :List[Any] = ''''''
return "\\\n".join(a_)
def _lowerCamelCase ( a_ : str , a_ : Optional[int]):
# unwrap multi-line input
lowerCamelCase :Optional[int] = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd)
# remove --output_dir if any and set our own
lowerCamelCase :int = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd)
args.base_cmd += F" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
lowerCamelCase :Optional[Any] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd)
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd)
def _lowerCamelCase ( a_ : Tuple , a_ : List[str] , a_ : str , a_ : List[Any] , a_ : List[str] , a_ : List[Any] , a_ : Tuple):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0)
return dict(
{k: random.uniform(0 , 1_00) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222])} , )
lowerCamelCase :Union[str, Any] = subprocess.run(a_ , capture_output=a_ , text=a_)
if verbose:
print('''STDOUT''' , result.stdout)
print('''STDERR''' , result.stderr)
# save the streams
lowerCamelCase :Dict = variation.replace(''' ''' , '''-''')
with open(Path(a_) / F"log.{prefix}.stdout.txt" , '''w''') as f:
f.write(result.stdout)
with open(Path(a_) / F"log.{prefix}.stderr.txt" , '''w''') as f:
f.write(result.stderr)
if result.returncode != 0:
if verbose:
print('''failed''')
return {target_metric_key: nan}
with io.open(F"{output_dir}/all_results.json" , '''r''' , encoding='''utf-8''') as f:
lowerCamelCase :List[str] = json.load(a_)
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _lowerCamelCase ( a_ : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : List[Any] , a_ : Dict , a_ : int , a_ : Any , a_ : str , a_ : List[str] , a_ : int , ):
lowerCamelCase :Dict = []
lowerCamelCase :List[Any] = []
lowerCamelCase :Any = F"{id}: {variation:<{longest_variation_len}}"
lowerCamelCase :int = F"{preamble}: "
lowerCamelCase :str = set(report_metric_keys + [target_metric_key])
for i in tqdm(range(a_) , desc=a_ , leave=a_):
lowerCamelCase :Union[str, Any] = process_run_single(
a_ , a_ , a_ , a_ , a_ , a_ , a_)
lowerCamelCase :Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(a_):
metrics.append(a_)
results.append(a_)
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase :List[str] = F"\33[2K\r{outcome}"
if len(a_) > 0:
lowerCamelCase :Tuple = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()}
lowerCamelCase :Union[str, Any] = round(mean_metrics[target_metric_key] , 2)
lowerCamelCase :Optional[int] = F"{outcome} {mean_target}"
if len(a_) > 1:
results_str += F" {tuple(round(a_ , 2) for x in results)}"
print(a_)
lowerCamelCase :Union[str, Any] = variation
return mean_metrics
else:
print(a_)
return {variation_key: variation, target_metric_key: nan}
def _lowerCamelCase ( ):
lowerCamelCase :List[str] = torch.cuda.get_device_properties(torch.device('''cuda'''))
return F"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : str , a_ : Optional[Any] , a_ : Optional[Any]):
lowerCamelCase :Any = pd.DataFrame(a_)
lowerCamelCase :Any = '''variation'''
lowerCamelCase :List[Any] = '''diff_%'''
lowerCamelCase :Any = nan
if base_variation is not None and len(df[df[variation_key] == base_variation]):
# this may still return nan
lowerCamelCase :Tuple = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(a_):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase :Optional[int] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(a_):
lowerCamelCase :Dict = df.apply(
lambda a_: round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value)
if not math.isnan(r[target_metric_key])
else 0 , axis='''columns''' , )
# re-order columns
lowerCamelCase :Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase :Dict = df.reindex(a_ , axis='''columns''') # reorder cols
# capitalize
lowerCamelCase :List[str] = df.rename(str.capitalize , axis='''columns''')
# make the cols as narrow as possible
lowerCamelCase :Optional[Any] = df.rename(lambda a_: c.replace('''_''' , '''<br>''') , axis='''columns''')
lowerCamelCase :Union[str, Any] = df.rename(lambda a_: c.replace('''_''' , '''\n''') , axis='''columns''')
lowerCamelCase :Any = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=a_ , floatfmt='''.2f''')]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=a_ , floatfmt='''.2f''')]
print('''\n\n'''.join(a_))
def _lowerCamelCase ( ):
lowerCamelCase :Dict = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=a_ , type=a_ , required=a_ , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=a_ , type=a_ , nargs='''+''' , required=a_ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=a_ , type=a_ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=a_ , type=a_ , required=a_ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=a_ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=a_ , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=a_ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=a_ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
lowerCamelCase :Optional[Any] = parser.parse_args()
lowerCamelCase :List[Any] = args.output_dir
Path(a_).mkdir(exist_ok=a_)
lowerCamelCase :List[str] = get_base_command(a_ , a_)
# split each dimension into its --foo variations
lowerCamelCase :int = [list(map(str.strip , re.split(R'''\|''' , a_))) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase :Optional[int] = list(map(str.strip , map(''' '''.join , itertools.product(*a_))))
lowerCamelCase :Tuple = max(len(a_) for x in variations)
# split wanted keys
lowerCamelCase :Tuple = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase :Dict = F"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt"
print(F"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt")
print(F"and this script's output is also piped into {report_fn}")
lowerCamelCase :Optional[Any] = Tee(a_)
print(F"\n*** Running {len(a_)} benchmarks:")
print(F"Base command: {' '.join(a_)}")
lowerCamelCase :Dict = '''variation'''
lowerCamelCase :Optional[Any] = []
for id, variation in enumerate(tqdm(a_ , desc='''Total completion: ''' , leave=a_)):
lowerCamelCase :int = base_cmd + variation.split()
results.append(
process_run(
id + 1 , a_ , a_ , a_ , a_ , args.target_metric_key , a_ , args.repeat_times , a_ , args.verbose , ))
process_results(a_ , args.target_metric_key , a_ , args.base_variation , a_)
if __name__ == "__main__":
main()
| 49
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
from __future__ import annotations
import math
def _lowerCamelCase ( a_ : int):
if num <= 0:
lowerCamelCase :Union[str, Any] = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(a_)
lowerCamelCase :Any = [True] * (num + 1)
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :List[Any] = 2
lowerCamelCase :Dict = int(math.sqrt(a_))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a_)
# Set multiples of start be False
for i in range(start * start , num + 1 , a_):
if sieve[i] is True:
lowerCamelCase :Optional[int] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(a_)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 49
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'openai-gpt'
_UpperCAmelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , __snake_case : str=40478 , __snake_case : Any=512 , __snake_case : Tuple=768 , __snake_case : Any=12 , __snake_case : Tuple=12 , __snake_case : Union[str, Any]="gelu" , __snake_case : Dict=0.1 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=1e-5 , __snake_case : str=0.0_2 , __snake_case : Optional[int]="cls_index" , __snake_case : int=True , __snake_case : List[Any]=None , __snake_case : int=True , __snake_case : Any=0.1 , **__snake_case : Any , ):
lowerCamelCase :List[str] = vocab_size
lowerCamelCase :str = n_positions
lowerCamelCase :Any = n_embd
lowerCamelCase :str = n_layer
lowerCamelCase :Optional[Any] = n_head
lowerCamelCase :Tuple = afn
lowerCamelCase :int = resid_pdrop
lowerCamelCase :Optional[Any] = embd_pdrop
lowerCamelCase :Any = attn_pdrop
lowerCamelCase :Optional[Any] = layer_norm_epsilon
lowerCamelCase :str = initializer_range
lowerCamelCase :str = summary_type
lowerCamelCase :int = summary_use_proj
lowerCamelCase :int = summary_activation
lowerCamelCase :Optional[Any] = summary_first_dropout
lowerCamelCase :int = summary_proj_to_labels
super().__init__(**__snake_case )
| 49
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 1
|
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Union[str, Any] = 0
for ch in input_str:
lowerCamelCase :Optional[Any] = ord(a_)
lowerCamelCase :List[Any] = pow(2 , a_)
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import torch
from diffusers import StableDiffusionPipeline
A__ = """path-to-your-trained-model"""
A__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
A__ = """A photo of sks dog in a bucket"""
A__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 49
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : Union[str, Any]):
lowerCamelCase :Union[str, Any] = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
lowerCamelCase :Dict = DetaConfig(
backbone_config=a_ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=a_ , with_box_refine=a_ , two_stage=a_ , )
# set labels
lowerCamelCase :Dict = '''huggingface/label-files'''
if "o365" in model_name:
lowerCamelCase :int = 3_66
lowerCamelCase :str = '''object365-id2label.json'''
else:
lowerCamelCase :List[Any] = 91
lowerCamelCase :int = '''coco-detection-id2label.json'''
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Union[str, Any] = json.load(open(cached_download(hf_hub_url(a_ , a_ , repo_type='''dataset''')) , '''r'''))
lowerCamelCase :List[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Dict = idalabel
lowerCamelCase :List[str] = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( a_ : Optional[Any]):
lowerCamelCase :Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias'''))
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight'''))
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias'''))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias"))
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias"))
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight'''))
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias'''))
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight'''))
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias'''))
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight'''))
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias'''))
# transformer encoder
for i in range(config.encoder_layers):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias"))
# transformer decoder
for i in range(config.decoder_layers):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias"))
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Tuple , a_ : List[Any]):
lowerCamelCase :List[Any] = dct.pop(a_)
lowerCamelCase :Tuple = val
def _lowerCamelCase ( a_ : Dict , a_ : Optional[Any]):
lowerCamelCase :Tuple = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
lowerCamelCase :Optional[int] = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase :List[str] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight")
lowerCamelCase :Dict = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Tuple = in_proj_weight[:dim, :]
lowerCamelCase :List[str] = in_proj_bias[: dim]
lowerCamelCase :Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase :Optional[int] = in_proj_bias[
dim : dim * 2
]
lowerCamelCase :Dict = in_proj_weight[
-dim :, :
]
lowerCamelCase :Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def _lowerCamelCase ( a_ : int , a_ : Optional[Any]):
# transformer decoder self-attention layers
lowerCamelCase :Union[str, Any] = config.d_model
for i in range(config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase :Union[str, Any] = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
lowerCamelCase :Any = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Dict = in_proj_weight[:hidden_size, :]
lowerCamelCase :Any = in_proj_bias[:hidden_size]
lowerCamelCase :Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCamelCase :Any = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase :Optional[int] = in_proj_weight[-hidden_size:, :]
lowerCamelCase :Union[str, Any] = in_proj_bias[-hidden_size:]
def _lowerCamelCase ( ):
lowerCamelCase :Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :List[Any] = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Tuple , a_ : Tuple , a_ : Dict):
lowerCamelCase :Tuple = get_deta_config(a_)
# load original state dict
if model_name == "deta-swin-large":
lowerCamelCase :Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''')
elif model_name == "deta-swin-large-o365":
lowerCamelCase :Optional[Any] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''')
else:
raise ValueError(F"Model name {model_name} not supported")
lowerCamelCase :int = torch.load(a_ , map_location='''cpu''')['''model''']
# original state dict
for name, param in state_dict.items():
print(a_ , param.shape)
# rename keys
lowerCamelCase :Union[str, Any] = create_rename_keys(a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_swin_q_k_v(a_ , config.backbone_config)
read_in_decoder_q_k_v(a_ , a_)
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCamelCase :List[str] = state_dict.pop(a_)
lowerCamelCase :List[str] = val
if "input_proj" in key:
lowerCamelCase :Optional[Any] = state_dict.pop(a_)
lowerCamelCase :str = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCamelCase :Dict = state_dict.pop(a_)
lowerCamelCase :str = val
# finally, create HuggingFace model and load state dict
lowerCamelCase :Tuple = DetaForObjectDetection(a_)
model.load_state_dict(a_)
model.eval()
lowerCamelCase :Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(a_)
# load image processor
lowerCamelCase :Tuple = DetaImageProcessor(format='''coco_detection''')
# verify our conversion on image
lowerCamelCase :int = prepare_img()
lowerCamelCase :List[str] = processor(images=a_ , return_tensors='''pt''')
lowerCamelCase :int = encoding['''pixel_values''']
lowerCamelCase :List[str] = model(pixel_values.to(a_))
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3])
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3])
if model_name == "deta-swin-large":
lowerCamelCase :str = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]])
lowerCamelCase :Optional[int] = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]])
elif model_name == "deta-swin-large-o365":
lowerCamelCase :Optional[int] = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]])
lowerCamelCase :List[Any] = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]])
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(a_) , atol=1e-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(a_) , atol=1e-4)
print('''Everything ok!''')
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
Path(a_).mkdir(exist_ok=a_)
model.save_pretrained(a_)
processor.save_pretrained(a_)
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''')
model.push_to_hub(F"jozhang97/{model_name}")
processor.push_to_hub(F"jozhang97/{model_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
A__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 1
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
A__ = logging.get_logger(__name__)
A__ = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _lowerCamelCase ( a_ : List[DatasetType] , a_ : Optional[List[float]] = None , a_ : Optional[int] = None , a_ : Optional[DatasetInfo] = None , a_ : Optional[NamedSplit] = None , a_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''')
for i, dataset in enumerate(a_):
if not isinstance(a_ , (Dataset, IterableDataset)):
if isinstance(a_ , (DatasetDict, IterableDatasetDict)):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''')
raise ValueError(
F"Dataset at position {i} has at least one split: {list(a_)}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(a_))}']")
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(a_).__name__}.")
if i == 0:
lowerCamelCase , lowerCamelCase :Tuple = (
(Dataset, IterableDataset) if isinstance(a_ , a_) else (IterableDataset, Dataset)
)
elif not isinstance(a_ , a_):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.")
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.")
if dataset_type is Dataset:
return _interleave_map_style_datasets(
a_ , a_ , a_ , info=a_ , split=a_ , stopping_strategy=a_)
else:
return _interleave_iterable_datasets(
a_ , a_ , a_ , info=a_ , split=a_ , stopping_strategy=a_)
def _lowerCamelCase ( a_ : List[DatasetType] , a_ : Optional[DatasetInfo] = None , a_ : Optional[NamedSplit] = None , a_ : int = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''')
for i, dataset in enumerate(a_):
if not isinstance(a_ , (Dataset, IterableDataset)):
if isinstance(a_ , (DatasetDict, IterableDatasetDict)):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''')
raise ValueError(
F"Dataset at position {i} has at least one split: {list(a_)}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(a_))}']")
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(a_).__name__}.")
if i == 0:
lowerCamelCase , lowerCamelCase :Dict = (
(Dataset, IterableDataset) if isinstance(a_ , a_) else (IterableDataset, Dataset)
)
elif not isinstance(a_ , a_):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.")
if dataset_type is Dataset:
return _concatenate_map_style_datasets(a_ , info=a_ , split=a_ , axis=a_)
else:
return _concatenate_iterable_datasets(a_ , info=a_ , split=a_ , axis=a_)
| 49
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['image_processor', 'tokenizer']
_UpperCAmelCase = 'CLIPImageProcessor'
_UpperCAmelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __snake_case : Union[str, Any]=None , __snake_case : Tuple=None , **__snake_case : Dict ):
lowerCamelCase :int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
lowerCamelCase :Dict = kwargs.pop('''feature_extractor''' )
lowerCamelCase :Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : Tuple , __snake_case : str=None , __snake_case : Dict=None , __snake_case : int=None , **__snake_case : str ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase :Dict = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
lowerCamelCase :Union[str, Any] = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
lowerCamelCase :Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def snake_case ( self : int , *__snake_case : int , **__snake_case : List[Any] ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def snake_case ( self : Optional[int] , *__snake_case : int , **__snake_case : Any ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def snake_case ( self : int ):
lowerCamelCase :int = self.tokenizer.model_input_names
lowerCamelCase :Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def snake_case ( self : Dict ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 49
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : int ):
lowerCamelCase :Optional[int] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Optional[Any] = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCamelCase :List[Any] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCamelCase :int = tempfile.mkdtemp()
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
# load decoder from hub
lowerCamelCase :Dict = '''hf-internal-testing/ngram-beam-search-decoder'''
def snake_case ( self : Optional[int] , **__snake_case : Any ):
lowerCamelCase :Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[int] , **__snake_case : List[Any] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Union[str, Any] , **__snake_case : List[Any] ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def snake_case ( self : Any ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Dict = self.get_tokenizer()
lowerCamelCase :Tuple = self.get_feature_extractor()
lowerCamelCase :Optional[int] = self.get_decoder()
lowerCamelCase :str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase :Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase :Any = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__snake_case , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case ( self : str ):
lowerCamelCase :Optional[Any] = self.get_feature_extractor()
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :Any = self.get_decoder()
lowerCamelCase :Any = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Tuple = floats_list((3, 1000) )
lowerCamelCase :Union[str, Any] = feature_extractor(__snake_case , return_tensors='''np''' )
lowerCamelCase :Optional[Any] = processor(__snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = self.get_feature_extractor()
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :Optional[Any] = self.get_decoder()
lowerCamelCase :List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Union[str, Any] = '''This is a test string'''
lowerCamelCase :Optional[int] = processor(text=__snake_case )
lowerCamelCase :Dict = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Dict , __snake_case : List[str]=(2, 10, 16) , __snake_case : Tuple=77 ):
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def snake_case ( self : Any ):
lowerCamelCase :Any = self.get_feature_extractor()
lowerCamelCase :int = self.get_tokenizer()
lowerCamelCase :int = self.get_decoder()
lowerCamelCase :int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Dict = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowerCamelCase :Optional[Any] = processor.decode(__snake_case )
lowerCamelCase :Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def snake_case ( self : Any , __snake_case : str ):
lowerCamelCase :str = self.get_feature_extractor()
lowerCamelCase :str = self.get_tokenizer()
lowerCamelCase :Any = self.get_decoder()
lowerCamelCase :Any = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Tuple = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase :int = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
lowerCamelCase :List[str] = processor.batch_decode(__snake_case , __snake_case )
lowerCamelCase :List[Any] = list(__snake_case )
with get_context('''fork''' ).Pool() as p:
lowerCamelCase :int = decoder.decode_beams_batch(__snake_case , __snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def snake_case ( self : Dict ):
lowerCamelCase :str = self.get_feature_extractor()
lowerCamelCase :int = self.get_tokenizer()
lowerCamelCase :List[Any] = self.get_decoder()
lowerCamelCase :str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Dict = self._get_dummy_logits()
lowerCamelCase :Optional[int] = 15
lowerCamelCase :Optional[Any] = -2_0.0
lowerCamelCase :int = -4.0
lowerCamelCase :int = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
lowerCamelCase :List[str] = decoded_processor_out.text
lowerCamelCase :List[str] = list(__snake_case )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase :List[Any] = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
lowerCamelCase :Dict = [d[0][0] for d in decoded_decoder_out]
lowerCamelCase :Optional[int] = [d[0][2] for d in decoded_decoder_out]
lowerCamelCase :Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , __snake_case , atol=1e-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , __snake_case , atol=1e-3 ) )
def snake_case ( self : Dict ):
lowerCamelCase :List[str] = self.get_feature_extractor()
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :List[str] = self.get_decoder()
lowerCamelCase :str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Dict = self._get_dummy_logits()
lowerCamelCase :str = 2.0
lowerCamelCase :Any = 5.0
lowerCamelCase :Optional[Any] = -2_0.0
lowerCamelCase :List[Any] = True
lowerCamelCase :List[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
lowerCamelCase :Dict = decoded_processor_out.text
lowerCamelCase :List[Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase :Dict = decoder.decode_beams_batch(
__snake_case , __snake_case , )
lowerCamelCase :Optional[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __snake_case )
lowerCamelCase :List[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase :Tuple = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase :Any = os.listdir(__snake_case )
lowerCamelCase :Tuple = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :Tuple = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
lowerCamelCase :Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase :Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase :List[Any] = os.listdir(__snake_case )
lowerCamelCase :Any = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :List[Any] = floats_list((3, 1000) )
lowerCamelCase :Optional[Any] = processor_wavaveca(__snake_case , return_tensors='''np''' )
lowerCamelCase :int = processor_auto(__snake_case , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
lowerCamelCase :Union[str, Any] = self._get_dummy_logits()
lowerCamelCase :Optional[Any] = processor_wavaveca.batch_decode(__snake_case )
lowerCamelCase :Optional[Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case ( self : List[str] ):
lowerCamelCase :Optional[int] = self.get_feature_extractor()
lowerCamelCase :int = self.get_tokenizer()
lowerCamelCase :Dict = self.get_decoder()
lowerCamelCase :str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def snake_case ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :List[str] = self._get_dummy_logits()[0]
lowerCamelCase :Any = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def snake_case ( self : List[str] ):
lowerCamelCase :List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :Optional[int] = self._get_dummy_logits()
lowerCamelCase :List[str] = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__snake_case , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case ( self : Dict ):
import torch
lowerCamelCase :Union[str, Any] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__snake_case )
lowerCamelCase :Optional[Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) )
lowerCamelCase :Dict = iter(__snake_case )
lowerCamelCase :Tuple = next(__snake_case )
lowerCamelCase :str = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCamelCase :Dict = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase :Optional[Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCamelCase :Dict = model(__snake_case ).logits.cpu().numpy()
lowerCamelCase :Dict = processor.decode(logits[0] , output_word_offsets=__snake_case )
lowerCamelCase :str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase :Tuple = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCamelCase :Dict = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__snake_case , '''word''' ) ) , __snake_case )
self.assertEqual(''' '''.join(self.get_from_offsets(__snake_case , '''word''' ) ) , output.text )
# output times
lowerCamelCase :List[Any] = torch.tensor(self.get_from_offsets(__snake_case , '''start_time''' ) )
lowerCamelCase :Any = torch.tensor(self.get_from_offsets(__snake_case , '''end_time''' ) )
# fmt: off
lowerCamelCase :Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
lowerCamelCase :Optional[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.0_1 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.0_1 ) )
| 49
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 1
|
from itertools import product
def _lowerCamelCase ( a_ : int , a_ : int):
lowerCamelCase :List[Any] = sides_number
lowerCamelCase :List[Any] = max_face_number * dice_number
lowerCamelCase :Dict = [0] * (max_total + 1)
lowerCamelCase :Tuple = 1
lowerCamelCase :Dict = range(a_ , max_face_number + 1)
for dice_numbers in product(a_ , repeat=a_):
lowerCamelCase :Optional[Any] = sum(a_)
totals_frequencies[total] += 1
return totals_frequencies
def _lowerCamelCase ( ):
lowerCamelCase :int = total_frequency_distribution(
sides_number=4 , dice_number=9)
lowerCamelCase :Any = total_frequency_distribution(
sides_number=6 , dice_number=6)
lowerCamelCase :Optional[int] = 0
lowerCamelCase :Union[str, Any] = 9
lowerCamelCase :List[Any] = 4 * 9
lowerCamelCase :List[Any] = 6
for peter_total in range(a_ , max_peter_total + 1):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total])
lowerCamelCase :Union[str, Any] = (4**9) * (6**6)
lowerCamelCase :Tuple = peter_wins_count / total_games_number
lowerCamelCase :Optional[int] = round(a_ , ndigits=7)
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'{solution() = }')
| 49
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 1
|
import unittest
import numpy as np
def _lowerCamelCase ( a_ : np.ndarray , a_ : np.ndarray , a_ : np.ndarray , a_ : np.ndarray | None = None , ):
lowerCamelCase :List[str] = np.shape(a_)
lowerCamelCase :str = np.shape(a_)
lowerCamelCase :int = np.shape(a_)
if shape_a[0] != shape_b[0]:
lowerCamelCase :Union[str, Any] = (
'''Expected the same number of rows for A and B. '''
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(a_)
if shape_b[1] != shape_c[1]:
lowerCamelCase :int = (
'''Expected the same number of columns for B and C. '''
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(a_)
lowerCamelCase :List[str] = pseudo_inv
if a_inv is None:
try:
lowerCamelCase :Tuple = np.linalg.inv(a_)
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''')
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase :List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase :List[Any] = np.array([[2, 1], [6, 3]] )
lowerCamelCase :List[Any] = schur_complement(__snake_case , __snake_case , __snake_case )
lowerCamelCase :Union[str, Any] = np.block([[a, b], [b.T, c]] )
lowerCamelCase :List[Any] = np.linalg.det(__snake_case )
lowerCamelCase :Optional[int] = np.linalg.det(__snake_case )
lowerCamelCase :str = np.linalg.det(__snake_case )
self.assertAlmostEqual(__snake_case , det_a * det_s )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase :Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase :Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__snake_case ):
schur_complement(__snake_case , __snake_case , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase :Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase :Optional[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__snake_case ):
schur_complement(__snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 49
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 1
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _lowerCamelCase ( a_ : ndarray):
return np.dot(a_ , a_)
class _lowerCAmelCase :
def __init__( self : Any , *,
__snake_case : float = np.inf , __snake_case : str = "linear" , __snake_case : float = 0.0 , ):
lowerCamelCase :Optional[int] = regularization
lowerCamelCase :Optional[Any] = gamma
if kernel == "linear":
lowerCamelCase :Tuple = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowerCamelCase :Tuple = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase :str = F"Unknown kernel: {kernel}"
raise ValueError(__snake_case )
def snake_case ( self : Union[str, Any] , __snake_case : ndarray , __snake_case : ndarray ):
return np.dot(__snake_case , __snake_case )
def snake_case ( self : List[Any] , __snake_case : ndarray , __snake_case : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def snake_case ( self : List[str] , __snake_case : list[ndarray] , __snake_case : ndarray ):
lowerCamelCase :str = observations
lowerCamelCase :Optional[Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCamelCase) , ) :str = np.shape(__snake_case )
def to_minimize(__snake_case : ndarray ) -> float:
lowerCamelCase :Dict = 0
((lowerCamelCase) , ) :Dict = np.shape(__snake_case )
for i in range(__snake_case ):
for j in range(__snake_case ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__snake_case )
lowerCamelCase :Tuple = LinearConstraint(__snake_case , 0 , 0 )
lowerCamelCase :Dict = Bounds(0 , self.regularization )
lowerCamelCase :Union[str, Any] = minimize(
__snake_case , np.ones(__snake_case ) , bounds=__snake_case , constraints=[ly_contraint] ).x
lowerCamelCase :List[str] = l_star
# calculating mean offset of separation plane to points
lowerCamelCase :Union[str, Any] = 0
for i in range(__snake_case ):
for j in range(__snake_case ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowerCamelCase :Any = s / n
def snake_case ( self : str , __snake_case : ndarray ):
lowerCamelCase :List[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __snake_case )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
import numpy as np
import datasets
A__ = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
A__ = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
A__ = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def snake_case ( self : List[Any] , __snake_case : str , __snake_case : Union[str, Any] ):
# convert to numpy arrays
lowerCamelCase :List[Any] = np.array(__snake_case )
lowerCamelCase :str = np.array(__snake_case )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowerCamelCase :Dict = X - np.mean(__snake_case )
lowerCamelCase :Optional[int] = np.cov(reference_distribution.T )
try:
lowerCamelCase :List[Any] = np.linalg.inv(__snake_case )
except np.linalg.LinAlgError:
lowerCamelCase :int = np.linalg.pinv(__snake_case )
lowerCamelCase :Optional[Any] = np.dot(__snake_case , __snake_case )
lowerCamelCase :Optional[int] = np.dot(__snake_case , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 49
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 1
|
def _lowerCamelCase ( a_ : int = 1_00):
lowerCamelCase :List[str] = n * (n + 1) * (2 * n + 1) / 6
lowerCamelCase :Tuple = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares)
if __name__ == "__main__":
print(F'{solution() = }')
| 49
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'funnel'
_UpperCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self : Any , __snake_case : List[Any]=30522 , __snake_case : Dict=[4, 4, 4] , __snake_case : Dict=None , __snake_case : Optional[int]=2 , __snake_case : Any=768 , __snake_case : Union[str, Any]=12 , __snake_case : Optional[int]=64 , __snake_case : Any=3072 , __snake_case : int="gelu_new" , __snake_case : Dict=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=0.0 , __snake_case : int=0.1 , __snake_case : Optional[int]=None , __snake_case : List[str]=1e-9 , __snake_case : Optional[Any]="mean" , __snake_case : Any="relative_shift" , __snake_case : str=True , __snake_case : Optional[Any]=True , __snake_case : Dict=True , **__snake_case : Union[str, Any] , ):
lowerCamelCase :str = vocab_size
lowerCamelCase :Dict = block_sizes
lowerCamelCase :Any = [1] * len(__snake_case ) if block_repeats is None else block_repeats
assert len(__snake_case ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowerCamelCase :str = num_decoder_layers
lowerCamelCase :List[str] = d_model
lowerCamelCase :Dict = n_head
lowerCamelCase :Any = d_head
lowerCamelCase :Optional[int] = d_inner
lowerCamelCase :List[Any] = hidden_act
lowerCamelCase :Optional[int] = hidden_dropout
lowerCamelCase :Optional[int] = attention_dropout
lowerCamelCase :Dict = activation_dropout
lowerCamelCase :Union[str, Any] = initializer_range
lowerCamelCase :List[str] = initializer_std
lowerCamelCase :Tuple = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
lowerCamelCase :List[str] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
lowerCamelCase :List[Any] = attention_type
lowerCamelCase :List[Any] = separate_cls
lowerCamelCase :Optional[Any] = truncate_seq
lowerCamelCase :List[Any] = pool_q_only
super().__init__(**__snake_case )
@property
def snake_case ( self : str ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def snake_case ( self : Union[str, Any] , __snake_case : Optional[Any] ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def snake_case ( self : Any ):
return len(self.block_sizes )
@num_blocks.setter
def snake_case ( self : Optional[Any] , __snake_case : Optional[int] ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 49
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 1
|
def _lowerCamelCase ( a_ : int , a_ : int):
return base * power(a_ , (exponent - 1)) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
A__ = int(input("""Enter the base: """).strip())
A__ = int(input("""Enter the exponent: """).strip())
A__ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A__ = 1 / result
print(F'{base} to the power of {exponent} is {result}')
| 49
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 1
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , __snake_case : Tuple , __snake_case : Optional[int]=13 , __snake_case : Any=7 , __snake_case : List[str]=True , __snake_case : Any=True , __snake_case : Tuple=True , __snake_case : Tuple=True , __snake_case : Tuple=99 , __snake_case : Optional[int]=32 , __snake_case : List[Any]=5 , __snake_case : Dict=4 , __snake_case : str=37 , __snake_case : List[str]="gelu" , __snake_case : List[str]=0.1 , __snake_case : int=0.1 , __snake_case : Optional[Any]=512 , __snake_case : List[str]=16 , __snake_case : Optional[Any]=2 , __snake_case : List[str]=0.0_2 , __snake_case : Optional[Any]=4 , ):
lowerCamelCase :Union[str, Any] = parent
lowerCamelCase :int = batch_size
lowerCamelCase :Any = seq_length
lowerCamelCase :str = is_training
lowerCamelCase :Optional[int] = use_attention_mask
lowerCamelCase :Dict = use_token_type_ids
lowerCamelCase :Any = use_labels
lowerCamelCase :List[Any] = vocab_size
lowerCamelCase :List[Any] = hidden_size
lowerCamelCase :Any = num_hidden_layers
lowerCamelCase :Union[str, Any] = num_attention_heads
lowerCamelCase :Optional[Any] = intermediate_size
lowerCamelCase :str = hidden_act
lowerCamelCase :Optional[Any] = hidden_dropout_prob
lowerCamelCase :List[str] = attention_probs_dropout_prob
lowerCamelCase :str = max_position_embeddings
lowerCamelCase :Union[str, Any] = type_vocab_size
lowerCamelCase :Optional[int] = type_sequence_label_size
lowerCamelCase :str = initializer_range
lowerCamelCase :Tuple = num_choices
def snake_case ( self : Dict ):
lowerCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :Optional[int] = None
if self.use_attention_mask:
lowerCamelCase :str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase :int = None
if self.use_token_type_ids:
lowerCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase :List[str] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :List[Any] = config_and_inputs
lowerCamelCase :Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case ( self : Dict ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Dict = True
lowerCamelCase :List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self : int ):
lowerCamelCase :int = FlaxRobertaModelTester(self )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
lowerCamelCase :List[str] = model_class_name.from_pretrained('''roberta-base''' , from_pt=__snake_case )
lowerCamelCase :Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
| 49
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 1
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = StableUnCLIPPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCAmelCase = False
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :List[str] = 32
lowerCamelCase :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=__snake_case , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase :Optional[int] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__snake_case , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase :List[Any] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=__snake_case , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase :Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
lowerCamelCase :Union[str, Any] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase :Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase :List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
lowerCamelCase :List[str] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = AutoencoderKL()
lowerCamelCase :int = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def snake_case ( self : int , __snake_case : Tuple , __snake_case : str=0 ):
if str(__snake_case ).startswith('''mps''' ):
lowerCamelCase :int = torch.manual_seed(__snake_case )
else:
lowerCamelCase :str = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCamelCase :int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[Any] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Dict = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Any ):
lowerCamelCase :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
lowerCamelCase :List[str] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase :Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase :Optional[Any] = pipe('''anime turle''' , generator=__snake_case , output_type='''np''' )
lowerCamelCase :Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase :str = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
lowerCamelCase :Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase :Optional[int] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
lowerCamelCase :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 1
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _lowerCamelCase ( a_ : List[str]):
lowerCamelCase :List[str] = args.pruning_method
lowerCamelCase :Tuple = args.threshold
lowerCamelCase :List[str] = args.model_name_or_path.rstrip('''/''')
lowerCamelCase :Optional[int] = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}")
lowerCamelCase :Optional[int] = torch.load(os.path.join(a_ , '''pytorch_model.bin'''))
lowerCamelCase :List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCamelCase :int = tensor
print(F"Copied layer {name}")
elif "classifier" in name or "qa_output" in name:
lowerCamelCase :int = tensor
print(F"Copied layer {name}")
elif "bias" in name:
lowerCamelCase :Optional[int] = tensor
print(F"Copied layer {name}")
else:
if pruning_method == "magnitude":
lowerCamelCase :List[Any] = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_)
lowerCamelCase :Optional[Any] = tensor * mask
print(F"Pruned layer {name}")
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCamelCase :Any = name[:-6]
lowerCamelCase :int = model[F"{prefix_}mask_scores"]
lowerCamelCase :List[str] = TopKBinarizer.apply(a_ , a_)
lowerCamelCase :Optional[int] = tensor * mask
print(F"Pruned layer {name}")
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCamelCase :List[Any] = name[:-6]
lowerCamelCase :Optional[Any] = model[F"{prefix_}mask_scores"]
lowerCamelCase :Any = ThresholdBinarizer.apply(a_ , a_ , a_)
lowerCamelCase :Optional[int] = tensor * mask
print(F"Pruned layer {name}")
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCamelCase :List[str] = name[:-6]
lowerCamelCase :Optional[Any] = model[F"{prefix_}mask_scores"]
lowerCamelCase , lowerCamelCase :Optional[Any] = -0.1, 1.1
lowerCamelCase :int = torch.sigmoid(a_)
lowerCamelCase :Any = s * (r - l) + l
lowerCamelCase :List[str] = s_bar.clamp(min=0.0 , max=1.0)
lowerCamelCase :Optional[Any] = tensor * mask
print(F"Pruned layer {name}")
else:
raise ValueError('''Unknown pruning method''')
if target_model_path is None:
lowerCamelCase :int = os.path.join(
os.path.dirname(a_) , F"bertarized_{os.path.basename(a_)}")
if not os.path.isdir(a_):
shutil.copytree(a_ , a_)
print(F"\nCreated folder {target_model_path}")
torch.save(a_ , os.path.join(a_ , '''pytorch_model.bin'''))
print('''\nPruned model saved! See you later!''')
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
A__ = parser.parse_args()
main(args)
| 49
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
def _lowerCamelCase ( ):
return [list(range(10_00 - i , -10_00 - i , -1)) for i in range(10_00)]
A__ = generate_large_matrix()
A__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _lowerCamelCase ( a_ : list[list[int]]):
assert all(row == sorted(a_ , reverse=a_) for row in grid)
assert all(list(a_) == sorted(a_ , reverse=a_) for col in zip(*a_))
def _lowerCamelCase ( a_ : list[int]):
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :Dict = len(a_) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase :str = (left + right) // 2
lowerCamelCase :Optional[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase :Any = mid + 1
else:
lowerCamelCase :int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a_)
def _lowerCamelCase ( a_ : list[list[int]]):
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = len(grid[0])
for i in range(len(a_)):
lowerCamelCase :str = find_negative_index(grid[i][:bound])
total += bound
return (len(a_) * len(grid[0])) - total
def _lowerCamelCase ( a_ : list[list[int]]):
return len([number for row in grid for number in row if number < 0])
def _lowerCamelCase ( a_ : list[list[int]]):
lowerCamelCase :Tuple = 0
for row in grid:
for i, number in enumerate(a_):
if number < 0:
total += len(a_) - i
break
return total
def _lowerCamelCase ( ):
from timeit import timeit
print('''Running benchmarks''')
lowerCamelCase :List[Any] = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase :Optional[int] = timeit(F"{func}(grid=grid)" , setup=a_ , number=5_00)
print(F"{func}() took {time:0.4f} seconds")
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 49
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
A__ = """bart"""
A__ = True
@st.cache(allow_output_mutation=a_)
def _lowerCamelCase ( ):
if LOAD_DENSE_INDEX:
lowerCamelCase :Any = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''')
lowerCamelCase :Dict = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''').to('''cuda:0''')
lowerCamelCase :Any = qar_model.eval()
else:
lowerCamelCase , lowerCamelCase :Tuple = (None, None)
if MODEL_TYPE == "bart":
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''')
lowerCamelCase :Tuple = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''').to('''cuda:0''')
lowerCamelCase :Optional[int] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''')
sas_model.load_state_dict(save_dict['''model'''])
lowerCamelCase :str = sas_model.eval()
else:
lowerCamelCase , lowerCamelCase :int = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''')
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a_)
def _lowerCamelCase ( ):
if LOAD_DENSE_INDEX:
lowerCamelCase :Union[str, Any] = faiss.StandardGpuResources()
lowerCamelCase :Optional[int] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''')['''train''']
lowerCamelCase :Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
lowerCamelCase :List[str] = faiss.IndexFlatIP(1_28)
lowerCamelCase :List[str] = faiss.index_cpu_to_gpu(a_ , 1 , a_)
wikiaab_gpu_index_flat.add(a_) # TODO fix for larger GPU
else:
lowerCamelCase , lowerCamelCase :Any = (None, None)
lowerCamelCase :Union[str, Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}])
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a_)
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''')
lowerCamelCase :List[Any] = elia['''train_eli5''']
lowerCamelCase :Optional[int] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28))
lowerCamelCase :str = faiss.IndexFlatIP(1_28)
eli5_train_q_index.add(a_)
return (elia_train, eli5_train_q_index)
A__ , A__ , A__ = load_indexes()
A__ , A__ , A__ , A__ = load_models()
A__ , A__ = load_train_data()
def _lowerCamelCase ( a_ : Tuple , a_ : Dict=10):
lowerCamelCase :Any = embed_questions_for_retrieval([question] , a_ , a_)
lowerCamelCase , lowerCamelCase :Any = eli5_train_q_index.search(a_ , a_)
lowerCamelCase :Dict = [elia_train[int(a_)] for i in I[0]]
return nn_examples
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : List[Any]="wiki40b" , a_ : Tuple="dense" , a_ : str=10):
if source == "none":
lowerCamelCase , lowerCamelCase :int = (''' <P> '''.join(['''''' for _ in range(11)]).strip(), [])
else:
if method == "dense":
lowerCamelCase , lowerCamelCase :List[str] = query_qa_dense_index(
a_ , a_ , a_ , a_ , a_ , a_)
else:
lowerCamelCase , lowerCamelCase :Dict = query_es_index(
a_ , a_ , index_name='''english_wiki40b_snippets_100w''' , n_results=a_ , )
lowerCamelCase :Any = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
lowerCamelCase :List[Any] = '''question: {} context: {}'''.format(a_ , a_)
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a_: None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a_: None),
})
def _lowerCamelCase ( a_ : Any , a_ : Union[str, Any] , a_ : int , a_ : Dict=64 , a_ : List[str]=2_56 , a_ : Optional[Any]=False , a_ : Any=2 , a_ : Any=0.95 , a_ : List[Any]=0.8):
with torch.no_grad():
lowerCamelCase :Optional[Any] = qa_sas_generate(
a_ , a_ , a_ , num_answers=1 , num_beams=a_ , min_len=a_ , max_len=a_ , do_sample=a_ , temp=a_ , top_p=a_ , top_k=a_ , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
A__ = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
A__ = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
A__ = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
A__ = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
A__ = st.sidebar.checkbox("""Demo options""")
if demo_options:
A__ = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
A__ = action_list.index(action_st)
A__ = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
A__ = show_type == """Show full text of passages"""
else:
A__ = 3
A__ = True
A__ = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
A__ = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
A__ = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
A__ = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
A__ = """wiki40b"""
A__ = """dense"""
A__ = """beam"""
A__ = 2
A__ = 64
A__ = 256
A__ = None
A__ = None
A__ = st.sidebar.checkbox("""Generation options""")
if generate_options:
A__ = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
A__ = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
A__ = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
A__ = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
A__ = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
A__ = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
A__ = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
A__ = None
# start main text
A__ = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
A__ = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
A__ = st.text_input("""Enter your question here:""", """""")
else:
A__ = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
A__ , A__ = make_support(question, source=wiki_source, method="""dense""", n_results=10)
A__ , A__ = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
A__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
A__ = support_list[:10]
A__ = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
A__ , A__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
A__ , A__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
A__ = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
A__ = res[1].strip()
if sec_titles == "":
A__ = """[{}]({})""".format(res[0], wiki_url)
else:
A__ = sec_titles.split(""" & """)
A__ = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
A__ = find_nearest_training(question)
A__ = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
A__ = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
A__ = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase ( a_ : str , a_ : str , a_ : str , a_ : PreTrainedTokenizer , a_ : int , a_ : Optional[int] = None , ):
lowerCamelCase :List[str] = {}
if train_file is not None:
lowerCamelCase :Tuple = [train_file]
if eval_file is not None:
lowerCamelCase :Optional[Any] = [eval_file]
if test_file is not None:
lowerCamelCase :Union[str, Any] = [test_file]
lowerCamelCase :Optional[int] = datasets.load_dataset('''csv''' , data_files=a_)
lowerCamelCase :List[str] = list(ds[list(files.keys())[0]].features.keys())
lowerCamelCase :Optional[Any] = features_name.pop(a_)
lowerCamelCase :Dict = list(set(ds[list(files.keys())[0]][label_name]))
lowerCamelCase :int = {label: i for i, label in enumerate(a_)}
lowerCamelCase :Dict = tokenizer.model_input_names
lowerCamelCase :int = {}
if len(a_) == 1:
for k in files.keys():
lowerCamelCase :int = ds[k].map(
lambda a_: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=a_ , max_length=a_ , padding='''max_length''') , batched=a_ , )
elif len(a_) == 2:
for k in files.keys():
lowerCamelCase :int = ds[k].map(
lambda a_: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=a_ , max_length=a_ , padding='''max_length''' , ) , batched=a_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowerCamelCase :Tuple = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase :Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowerCamelCase :int = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase :List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowerCamelCase :Tuple = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase :Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
lowerCamelCase :Dict = (
tf.data.Dataset.from_generator(
a_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowerCamelCase :List[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
lowerCamelCase :str = (
tf.data.Dataset.from_generator(
a_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowerCamelCase :Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
lowerCamelCase :Tuple = (
tf.data.Dataset.from_generator(
a_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowerCamelCase :List[str] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
A__ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
_UpperCAmelCase = field(metadata={'help': 'Which column contains the label'} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the training file'} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the development file'} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the test file'} )
_UpperCAmelCase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class _lowerCAmelCase :
_UpperCAmelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
lowerCamelCase , lowerCamelCase , lowerCamelCase :Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, "
F"16-bits training: {training_args.fpaa}")
logger.info(F"Training/evaluation parameters {training_args}")
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase :str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=a_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowerCamelCase :Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(a_) , labelaid=a_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowerCamelCase :List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path) , config=a_ , cache_dir=model_args.cache_dir , )
def compute_metrics(a_ : EvalPrediction) -> Dict:
lowerCamelCase :List[Any] = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowerCamelCase :str = TFTrainer(
model=a_ , args=a_ , train_dataset=a_ , eval_dataset=a_ , compute_metrics=a_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
lowerCamelCase :Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
lowerCamelCase :Union[str, Any] = trainer.evaluate()
lowerCamelCase :Tuple = os.path.join(training_args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(F" {key} = {value}")
writer.write(F"{key} = {value}\n")
results.update(a_)
return results
if __name__ == "__main__":
main()
| 49
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
from math import ceil
def _lowerCamelCase ( a_ : int = 10_01):
lowerCamelCase :Union[str, Any] = 1
for i in range(1 , int(ceil(n / 2.0))):
lowerCamelCase :Any = 2 * i + 1
lowerCamelCase :str = 2 * i
lowerCamelCase :Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 49
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 1
|
def _lowerCamelCase ( a_ : float , a_ : float):
if density <= 0:
raise ValueError('''Impossible fluid density''')
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''')
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 1
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _lowerCAmelCase :
pass
| 49
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 1
|
def _lowerCamelCase ( a_ : int):
if p < 2:
raise ValueError('''p should not be less than 2!''')
elif p == 2:
return True
lowerCamelCase :int = 4
lowerCamelCase :List[str] = (1 << p) - 1
for _ in range(p - 2):
lowerCamelCase :Union[str, Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 49
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
from math import sqrt
def _lowerCamelCase ( a_ : int):
lowerCamelCase :List[Any] = 0
for i in range(1 , int(sqrt(a_) + 1)):
if n % i == 0 and i != sqrt(a_):
total += i + n // i
elif i == sqrt(a_):
total += i
return total - n
def _lowerCamelCase ( a_ : int = 1_00_00):
lowerCamelCase :int = sum(
i
for i in range(1 , a_)
if sum_of_divisors(sum_of_divisors(a_)) == i and sum_of_divisors(a_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 49
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 1
|
class _lowerCAmelCase :
def __init__( self : int , __snake_case : int , __snake_case : List[Any]=None , __snake_case : List[str]=None ):
lowerCamelCase :Union[str, Any] = data
lowerCamelCase :str = previous
lowerCamelCase :Optional[int] = next_node
def __str__( self : List[Any] ):
return F"{self.data}"
def snake_case ( self : List[str] ):
return self.data
def snake_case ( self : int ):
return self.next
def snake_case ( self : str ):
return self.previous
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , __snake_case : Any ):
lowerCamelCase :str = head
def __iter__( self : Union[str, Any] ):
return self
def snake_case ( self : Any ):
if not self.current:
raise StopIteration
else:
lowerCamelCase :Dict = self.current.get_data()
lowerCamelCase :Union[str, Any] = self.current.get_next()
return value
class _lowerCAmelCase :
def __init__( self : str ):
lowerCamelCase :Optional[Any] = None # First node in list
lowerCamelCase :Union[str, Any] = None # Last node in list
def __str__( self : Dict ):
lowerCamelCase :Tuple = self.head
lowerCamelCase :Any = []
while current is not None:
nodes.append(current.get_data() )
lowerCamelCase :Optional[Any] = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : List[Any] , __snake_case : int ):
lowerCamelCase :Optional[int] = self.head
while current:
if current.get_data() == value:
return True
lowerCamelCase :Union[str, Any] = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def snake_case ( self : Union[str, Any] ):
if self.head:
return self.head.get_data()
return None
def snake_case ( self : List[str] ):
if self.tail:
return self.tail.get_data()
return None
def snake_case ( self : Optional[Any] , __snake_case : Node ):
if self.head is None:
lowerCamelCase :Optional[int] = node
lowerCamelCase :Dict = node
else:
self.insert_before_node(self.head , __snake_case )
def snake_case ( self : List[str] , __snake_case : Node ):
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def snake_case ( self : int , __snake_case : int ):
lowerCamelCase :List[Any] = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def snake_case ( self : Tuple , __snake_case : Node , __snake_case : Node ):
lowerCamelCase :Any = node
lowerCamelCase :Dict = node.previous
if node.get_previous() is None:
lowerCamelCase :Optional[Any] = node_to_insert
else:
lowerCamelCase :Optional[Any] = node_to_insert
lowerCamelCase :int = node_to_insert
def snake_case ( self : Optional[Any] , __snake_case : Node , __snake_case : Node ):
lowerCamelCase :Dict = node
lowerCamelCase :Optional[int] = node.next
if node.get_next() is None:
lowerCamelCase :Union[str, Any] = node_to_insert
else:
lowerCamelCase :Dict = node_to_insert
lowerCamelCase :List[str] = node_to_insert
def snake_case ( self : int , __snake_case : int , __snake_case : int ):
lowerCamelCase :Tuple = 1
lowerCamelCase :int = Node(__snake_case )
lowerCamelCase :List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
lowerCamelCase :str = node.next
self.insert_after_node(self.tail , __snake_case )
def snake_case ( self : Any , __snake_case : int ):
lowerCamelCase :Dict = self.head
while node:
if node.get_data() == item:
return node
lowerCamelCase :List[Any] = node.get_next()
raise Exception('''Node not found''' )
def snake_case ( self : Any , __snake_case : Tuple ):
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
lowerCamelCase :Optional[Any] = self.head.get_next()
if node == self.tail:
lowerCamelCase :Optional[Any] = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def snake_case ( __snake_case : Node ):
if node.get_next():
lowerCamelCase :Any = node.previous
if node.get_previous():
lowerCamelCase :Any = node.next
lowerCamelCase :Any = None
lowerCamelCase :Tuple = None
def snake_case ( self : List[str] ):
return self.head is None
def _lowerCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xlnet'
_UpperCAmelCase = ['mems']
_UpperCAmelCase = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , __snake_case : List[str]=32000 , __snake_case : List[Any]=1024 , __snake_case : List[str]=24 , __snake_case : Union[str, Any]=16 , __snake_case : Tuple=4096 , __snake_case : Any="gelu" , __snake_case : Dict=True , __snake_case : Tuple="bi" , __snake_case : Tuple=0.0_2 , __snake_case : Dict=1e-1_2 , __snake_case : Union[str, Any]=0.1 , __snake_case : str=512 , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=False , __snake_case : Any=-1 , __snake_case : Any=False , __snake_case : Optional[int]="last" , __snake_case : int=True , __snake_case : Any="tanh" , __snake_case : Any=0.1 , __snake_case : int=5 , __snake_case : str=5 , __snake_case : Optional[int]=5 , __snake_case : Dict=1 , __snake_case : Optional[int]=2 , **__snake_case : Union[str, Any] , ):
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Any = d_model
lowerCamelCase :Union[str, Any] = n_layer
lowerCamelCase :Union[str, Any] = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCamelCase :str = d_model // n_head
lowerCamelCase :int = ff_activation
lowerCamelCase :Dict = d_inner
lowerCamelCase :int = untie_r
lowerCamelCase :Optional[int] = attn_type
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Dict = layer_norm_eps
lowerCamelCase :Union[str, Any] = dropout
lowerCamelCase :Union[str, Any] = mem_len
lowerCamelCase :Optional[int] = reuse_len
lowerCamelCase :List[str] = bi_data
lowerCamelCase :Any = clamp_len
lowerCamelCase :Dict = same_length
lowerCamelCase :Optional[Any] = summary_type
lowerCamelCase :Union[str, Any] = summary_use_proj
lowerCamelCase :Optional[Any] = summary_activation
lowerCamelCase :Optional[Any] = summary_last_dropout
lowerCamelCase :List[str] = start_n_top
lowerCamelCase :int = end_n_top
lowerCamelCase :Any = bos_token_id
lowerCamelCase :Optional[Any] = pad_token_id
lowerCamelCase :Union[str, Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __snake_case , )
lowerCamelCase :Optional[int] = kwargs['''use_cache''']
lowerCamelCase :Optional[int] = use_mems_eval
lowerCamelCase :Optional[int] = use_mems_train
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def snake_case ( self : Dict ):
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def snake_case ( self : Optional[Any] , __snake_case : int ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 49
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 1
|
def _lowerCamelCase ( a_ : list):
lowerCamelCase :str = len(a_)
for _ in range(a_):
for i in range(_ % 2 , arr_size - 1 , 2):
if arr[i + 1] < arr[i]:
lowerCamelCase , lowerCamelCase :Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A__ = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 49
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str):
lowerCamelCase :int = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''])
lowerCamelCase :Tuple = MaskFormerConfig(backbone_config=a_)
lowerCamelCase :Tuple = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase :Any = 8_47
lowerCamelCase :List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowerCamelCase :Union[str, Any] = 1_50
lowerCamelCase :str = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase :List[Any] = 1_71
lowerCamelCase :str = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowerCamelCase :Optional[Any] = 1_33
lowerCamelCase :Tuple = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase :Optional[Any] = 19
lowerCamelCase :List[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowerCamelCase :str = 65
lowerCamelCase :Optional[Any] = '''mapillary-vistas-id2label.json'''
lowerCamelCase :Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Union[str, Any] = {int(a_): v for k, v in idalabel.items()}
return config
def _lowerCamelCase ( a_ : Optional[Any]):
lowerCamelCase :Any = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias'''))
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight'''))
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias'''))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight"))
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias"))
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight"))
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight"))
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias"))
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight"))
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias"))
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight'''))
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight'''))
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias'''))
for source_index, target_index in zip(range(3 , 0 , -1) , range(0 , 3)):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight"))
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight"))
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias"))
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight"))
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight"))
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias"))
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight'''))
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias'''))
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight"))
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias"))
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight"))
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias"))
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight"))
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias"))
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight"))
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias"))
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight"))
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias"))
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight"))
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias"))
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight"))
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias"))
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight'''))
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias'''))
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight'''))
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight'''))
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias'''))
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight'''))
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias'''))
for i in range(3):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight"))
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias"))
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : str , a_ : List[str] , a_ : List[Any]):
lowerCamelCase :Optional[int] = dct.pop(a_)
lowerCamelCase :int = val
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : List[str]):
lowerCamelCase :Optional[Any] = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
lowerCamelCase :str = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase :Union[str, Any] = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight")
lowerCamelCase :Tuple = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Dict = in_proj_weight[:dim, :]
lowerCamelCase :str = in_proj_bias[: dim]
lowerCamelCase :Any = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase :List[Any] = in_proj_bias[
dim : dim * 2
]
lowerCamelCase :Union[str, Any] = in_proj_weight[
-dim :, :
]
lowerCamelCase :Optional[int] = in_proj_bias[-dim :]
# fmt: on
def _lowerCamelCase ( a_ : Tuple , a_ : Optional[int]):
# fmt: off
lowerCamelCase :Dict = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase :str = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight")
lowerCamelCase :List[str] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[: hidden_size, :]
lowerCamelCase :Optional[Any] = in_proj_bias[:config.hidden_size]
lowerCamelCase :Any = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase :Tuple = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase :Union[str, Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase :int = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight")
lowerCamelCase :List[str] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :List[Any] = in_proj_weight[: hidden_size, :]
lowerCamelCase :List[str] = in_proj_bias[:config.hidden_size]
lowerCamelCase :Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase :Any = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase :List[str] = in_proj_weight[-hidden_size :, :]
lowerCamelCase :str = in_proj_bias[-hidden_size :]
# fmt: on
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Union[str, Any] = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : str , a_ : str , a_ : str , a_ : bool = False):
lowerCamelCase :Dict = get_maskformer_config(a_)
# load original state_dict
with open(a_ , '''rb''') as f:
lowerCamelCase :str = pickle.load(a_)
lowerCamelCase :int = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase :Union[str, Any] = create_rename_keys(a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_swin_q_k_v(a_ , config.backbone_config)
read_in_decoder_q_k_v(a_ , a_)
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase :Any = torch.from_numpy(a_)
# load 🤗 model
lowerCamelCase :str = MaskFormerForInstanceSegmentation(a_)
model.eval()
for name, param in model.named_parameters():
print(a_ , param.shape)
lowerCamelCase , lowerCamelCase :str = model.load_state_dict(a_ , strict=a_)
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(a_) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
lowerCamelCase :Dict = prepare_img()
if "vistas" in model_name:
lowerCamelCase :Dict = 65
elif "cityscapes" in model_name:
lowerCamelCase :List[str] = 6_55_35
else:
lowerCamelCase :Optional[Any] = 2_55
lowerCamelCase :str = True if '''ade''' in model_name else False
lowerCamelCase :Tuple = MaskFormerImageProcessor(ignore_index=a_ , reduce_labels=a_)
lowerCamelCase :int = image_processor(a_ , return_tensors='''pt''')
lowerCamelCase :List[str] = model(**a_)
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3])
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase :List[Any] = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]])
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , a_ , atol=1e-4)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}")
Path(a_).mkdir(exist_ok=a_)
model.save_pretrained(a_)
image_processor.save_pretrained(a_)
if push_to_hub:
print('''Pushing model and image processor to the hub...''')
model.push_to_hub(F"nielsr/{model_name}")
image_processor.push_to_hub(F"nielsr/{model_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
A__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 49
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.