code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
UpperCAmelCase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase__ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A )
class __lowerCAmelCase :
def __call__( self : List[str] , A : Any , A : Optional[str] = None , A : Optional[str] = None , A : Union[bool, str] = False , A : Union[bool, str] = False , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = None , **A : List[Any] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
A , A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
_UpperCAmelCase = titles if not isinstance(A , A) else [titles]
_UpperCAmelCase = texts if not isinstance(A , A) else [texts]
_UpperCAmelCase = len(A)
_UpperCAmelCase = questions if not isinstance(A , A) else [questions] * n_passages
assert len(A) == len(
A), F"There should be as many titles than texts but got {len(A)} titles and {len(A)} texts."
_UpperCAmelCase = super().__call__(A , A , padding=A , truncation=A)['input_ids']
_UpperCAmelCase = super().__call__(A , add_special_tokens=A , padding=A , truncation=A)['input_ids']
_UpperCAmelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A , A)
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_UpperCAmelCase = attention_mask
return self.pad(A , padding=A , max_length=A , return_tensors=A)
def _lowerCamelCase ( self : Dict , A : BatchEncoding , A : DPRReaderOutput , A : int = 16 , A : int = 64 , A : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = reader_input['input_ids']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(A)
_UpperCAmelCase = sorted(range(A) , reverse=A , key=relevance_logits.__getitem__)
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id)
else:
_UpperCAmelCase = len(A)
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A , top_spans=A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A , start_index=A , end_index=A , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(A) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCamelCase ( self : Optional[Any] , A : List[int] , A : List[int] , A : int , A : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = []
for start_index, start_score in enumerate(A):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_UpperCAmelCase = sorted(A , key=lambda A: x[1] , reverse=A)
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
_UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(A) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A )
class __lowerCAmelCase ( A , A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = DPRReaderTokenizer
| 700
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
def __init__( self : str , A : int , A : int , A : float , **A : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = feature_size
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = padding_value
_UpperCAmelCase = kwargs.pop('padding_side' , 'right')
_UpperCAmelCase = kwargs.pop('return_attention_mask' , A)
super().__init__(**A)
def _lowerCamelCase ( self : Dict , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
"""simple docstring"""
if isinstance(A , (list, tuple)) and isinstance(processed_features[0] , (dict, BatchFeature)):
_UpperCAmelCase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys())}")
_UpperCAmelCase = processed_features[self.model_input_names[0]]
_UpperCAmelCase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A) == 0:
if return_attention_mask:
_UpperCAmelCase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCAmelCase = required_input[0]
if isinstance(A , (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCAmelCase = 0
while len(required_input[index]) == 0:
index += 1
if index < len(A):
_UpperCAmelCase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A):
_UpperCAmelCase = 'tf'
elif is_torch_tensor(A):
_UpperCAmelCase = 'pt'
elif isinstance(A , (int, float, list, tuple, np.ndarray)):
_UpperCAmelCase = 'np'
else:
raise ValueError(
F"type of {first_element} unknown: {type(A)}. "
'Should be one of a python, numpy, pytorch or tensorflow object.')
for key, value in processed_features.items():
if isinstance(value[0] , (int, float)):
_UpperCAmelCase = to_numpy(A)
else:
_UpperCAmelCase = [to_numpy(A) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCAmelCase = self._get_padding_strategies(padding=A , max_length=A)
_UpperCAmelCase = processed_features[self.model_input_names[0]]
_UpperCAmelCase = len(A)
if not all(len(A) == batch_size for v in processed_features.values()):
raise ValueError('Some items in the output dictionary have a different batch size than others.')
_UpperCAmelCase = []
for i in range(A):
_UpperCAmelCase = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCAmelCase = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A)
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCAmelCase = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
_UpperCAmelCase = PaddingStrategy.MAX_LENGTH
_UpperCAmelCase = {}
for i in range(A):
# padding
_UpperCAmelCase = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCAmelCase = []
if value.dtype is np.dtype(np.floataa):
_UpperCAmelCase = value.astype(np.floataa)
batch_outputs[key].append(A)
return BatchFeature(A , tensor_type=A)
def _lowerCamelCase ( self : Union[str, Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ) -> dict:
"""simple docstring"""
_UpperCAmelCase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCAmelCase = len(A)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCAmelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCAmelCase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCAmelCase = np.ones(len(A) , dtype=np.intaa)
if needs_to_be_padded:
_UpperCAmelCase = max_length - len(A)
if self.padding_side == "right":
if return_attention_mask:
_UpperCAmelCase = np.pad(
processed_features['attention_mask'] , (0, difference))
_UpperCAmelCase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCAmelCase = np.pad(
A , A , 'constant' , constant_values=self.padding_value)
elif self.padding_side == "left":
if return_attention_mask:
_UpperCAmelCase = np.pad(
processed_features['attention_mask'] , (difference, 0))
_UpperCAmelCase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCAmelCase = np.pad(
A , A , 'constant' , constant_values=self.padding_value)
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return processed_features
def _lowerCamelCase ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ) -> Union[str, Any]:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.')
_UpperCAmelCase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCAmelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCAmelCase = len(A) > max_length
if needs_to_be_truncated:
_UpperCAmelCase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCAmelCase = processed_features['attention_mask'][:max_length]
return processed_features
def _lowerCamelCase ( self : Tuple , A : Tuple=False , A : Tuple=None) -> Optional[int]:
"""simple docstring"""
if padding is not False:
if padding is True:
_UpperCAmelCase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A):
_UpperCAmelCase = PaddingStrategy(A)
elif isinstance(A , A):
_UpperCAmelCase = padding
else:
_UpperCAmelCase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined")
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.')
return padding_strategy
| 701
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 0
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase__ : List[Any] = logging.getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = 50 # max width of layer names
UpperCAmelCase__ : int = 70 # max width of quantizer names
def A ( _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_UpperCAmelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_UpperCAmelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_UpperCAmelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_UpperCAmelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_UpperCAmelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_UpperCAmelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def A ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
if args.calibrator == "max":
_UpperCAmelCase = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_UpperCAmelCase = 'histogram'
elif args.calibrator == "mse":
_UpperCAmelCase = 'histogram'
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
_UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=_UpperCAmelCase )
_UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_UpperCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : List[Any]=False ) -> Optional[int]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_UpperCAmelCase , ['embeddings'] , which='weight' , _disabled=_UpperCAmelCase )
if args.quant_disable:
set_quantizer_by_name(_UpperCAmelCase , [''] , _disabled=_UpperCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_UpperCAmelCase , args.quant_disable_keyword , _disabled=_UpperCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_UpperCAmelCase , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_UpperCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_UpperCAmelCase , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_UpperCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(_UpperCAmelCase )
if args.fuse_qkv:
fuse_qkv(_UpperCAmelCase , _UpperCAmelCase )
if args.clip_gelu:
clip_gelu(_UpperCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
def fusea(_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_UpperCAmelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_UpperCAmelCase = qq._amax.detach().item()
_UpperCAmelCase = qk._amax.detach().item()
_UpperCAmelCase = qv._amax.detach().item()
_UpperCAmelCase = max(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
qq._amax.fill_(_UpperCAmelCase )
qk._amax.fill_(_UpperCAmelCase )
qv._amax.fill_(_UpperCAmelCase )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_UpperCAmelCase )
_UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def A ( _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_UpperCAmelCase = mod.weight.shape[0]
_UpperCAmelCase = mod._weight_quantizer._amax.detach()
_UpperCAmelCase = torch.ones(_UpperCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def A ( _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
_UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_UpperCAmelCase , keepdims=_UpperCAmelCase ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
_UpperCAmelCase = amax
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=25 , _UpperCAmelCase : int=180 , _UpperCAmelCase : int=None ) -> Tuple:
'''simple docstring'''
if ignore is None:
_UpperCAmelCase = []
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = [ignore]
_UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(_UpperCAmelCase , 'weight' ):
continue
_UpperCAmelCase = max(_UpperCAmelCase , len(_UpperCAmelCase ) )
for name, mod in model.named_modules():
_UpperCAmelCase = getattr(_UpperCAmelCase , '_input_quantizer' , _UpperCAmelCase )
_UpperCAmelCase = getattr(_UpperCAmelCase , '_weight_quantizer' , _UpperCAmelCase )
if not hasattr(_UpperCAmelCase , 'weight' ):
continue
if type(_UpperCAmelCase ) in ignore:
continue
if [True for s in ignore if type(_UpperCAmelCase ) is str and s in name]:
continue
_UpperCAmelCase = F"Act:{input_q.extra_repr()}"
_UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
_UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(_UpperCAmelCase ) <= line_width:
logger.info(_UpperCAmelCase )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(_UpperCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if quantizer_mod is not None:
assert hasattr(_UpperCAmelCase , _UpperCAmelCase )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
logger.warning(F"{name} has no {quantizer}" )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]="both" , **_UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(_UpperCAmelCase , _UpperCAmelCase , '_input_quantizer' , _UpperCAmelCase , _UpperCAmelCase )
if which in ["weight", "both"]:
set_quantizer(_UpperCAmelCase , _UpperCAmelCase , '_weight_quantizer' , _UpperCAmelCase , _UpperCAmelCase )
logger.info(_UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : int , **_UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , '_input_quantizer' ) or hasattr(_UpperCAmelCase , '_weight_quantizer' ):
for n in names:
if re.search(_UpperCAmelCase , _UpperCAmelCase ):
set_quantizers(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info(_UpperCAmelCase )
| 702
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 0
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase__ = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
_UpperCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
RF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _UpperCAmelCase , )
is not None
):
_UpperCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_UpperCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_UpperCAmelCase = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_UpperCAmelCase = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_UpperCAmelCase = True
if not attribute_used:
_UpperCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_UpperCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_UpperCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_UpperCAmelCase = True
elif attribute.endswith('_token_id' ):
_UpperCAmelCase = True
# configuration class specific cases
if not case_allowed:
_UpperCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_UpperCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A ( _UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
_UpperCAmelCase = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_UpperCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_UpperCAmelCase = {}
if len(config_class.attribute_map ) > 0:
_UpperCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_UpperCAmelCase = inspect.getsourcefile(_UpperCAmelCase )
_UpperCAmelCase = os.path.dirname(_UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_UpperCAmelCase = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for fn in os.listdir(_UpperCAmelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
_UpperCAmelCase = []
for path in modeling_paths:
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
_UpperCAmelCase = []
for config_param, default_value in zip(_UpperCAmelCase , _UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
_UpperCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCAmelCase )
def A ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_UpperCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCAmelCase : inspect.isclass(_UpperCAmelCase )
and issubclass(_UpperCAmelCase , _UpperCAmelCase )
and inspect.getmodule(_UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_UpperCAmelCase = check_config_attributes_being_used(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_UpperCAmelCase = unused_attributes
if len(_UpperCAmelCase ) > 0:
_UpperCAmelCase = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 703
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 0
|
def A ( _UpperCAmelCase : list[list[int | float]] ) -> int:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = len(matrix[0] )
_UpperCAmelCase = min(_UpperCAmelCase , _UpperCAmelCase )
for row in range(_UpperCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = matrix[col][row] / matrix[row][row]
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_UpperCAmelCase = True
for i in range(row + 1 , _UpperCAmelCase ):
if matrix[i][row] != 0:
_UpperCAmelCase , _UpperCAmelCase = matrix[i], matrix[row]
_UpperCAmelCase = False
break
if reduce:
rank -= 1
for i in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 0
|
import requests
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = {'Content-Type': 'application/json'}
_UpperCAmelCase = requests.post(_UpperCAmelCase , json={'text': message_body} , headers=_UpperCAmelCase )
if response.status_code != 200:
_UpperCAmelCase = (
'Request to slack returned an error '
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 705
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 0
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
_UpperCAmelCase = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase = raw_datasets['train'].features['label'].names
_UpperCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , )
_UpperCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase = {'Refused': 0, 'Entailed': 1}
_UpperCAmelCase = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCAmelCase : List[Any] ):
_UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase = examples['statement']
_UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase )
_UpperCAmelCase = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_UpperCAmelCase = raw_datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCAmelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCAmelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_UpperCAmelCase = raw_datasets['test']
if data_args.max_predict_samples is not None:
_UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase = predict_dataset.remove_columns('label' )
_UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = label_list[item]
writer.write(F"{index}\t{item}\n" )
_UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 706
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 0
|
from itertools import product
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(_UpperCAmelCase , max_face_number + 1 )
for dice_numbers in product(_UpperCAmelCase , repeat=_UpperCAmelCase ):
_UpperCAmelCase = sum(_UpperCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def A ( ) -> float:
'''simple docstring'''
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(_UpperCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(_UpperCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 708
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 0
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCAmelCase ( A ):
UpperCamelCase = 4_2
UpperCamelCase = None
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=0.999 , _UpperCAmelCase : Any="cosine" , ) -> Optional[int]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_UpperCAmelCase = []
for i in range(_UpperCAmelCase ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class __lowerCAmelCase ( A , A ):
UpperCamelCase = 1
@register_to_config
def __init__( self : Union[str, Any] , A : int = 10_00 , A : float = 0.0_0_0_1 , A : float = 0.0_2 , A : str = "linear" , A : Optional[Union[np.ndarray, List[float]]] = None , A : bool = True , A : bool = True , A : int = 0 , A : str = "epsilon" , A : float = 1.0 , **A : List[Any] , ) -> str:
"""simple docstring"""
if kwargs.get('set_alpha_to_one' , A) is not None:
_UpperCAmelCase = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , A , standard_warn=A)
_UpperCAmelCase = kwargs['set_alpha_to_one']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(A , dtype=torch.floataa)
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(A , A , A , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(A)
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}")
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0)
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , A).copy().astype(np.intaa))
def _lowerCamelCase ( self : int , A : torch.FloatTensor , A : Optional[int] = None) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _lowerCamelCase ( self : Optional[Any] , A : int , A : Union[str, torch.device] = None) -> List[Any]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps.")
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , A) * step_ratio).round().copy().astype(np.intaa)
_UpperCAmelCase = torch.from_numpy(A).to(A)
self.timesteps += self.config.steps_offset
def _lowerCamelCase ( self : str , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : float = 0.0 , A : bool = False , A : Optional[torch.FloatTensor] = None , A : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
' `v_prediction`')
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=A , pred_original_sample=A)
def __len__( self : int) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 709
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 0
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
def __init__( self : Dict , *A : List[Any] , **A : List[Any]) -> None:
"""simple docstring"""
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , A , )
super().__init__(*A , **A)
| 710
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 0
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
UpperCAmelCase__ = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class __lowerCAmelCase :
UpperCamelCase = 4_2
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _str_to_version_tuple(self.version_str)
def __repr__( self : List[Any]) -> int:
"""simple docstring"""
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
return self.major, self.minor, self.patch
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
if isinstance(A , A):
return Version(A)
elif isinstance(A , A):
return other
raise TypeError(F"{other} (type {type(A)}) cannot be compared to version.")
def __eq__( self : List[str] , A : Optional[int]) -> Tuple:
"""simple docstring"""
try:
_UpperCAmelCase = self._validate_operand(A)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int , A : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self._validate_operand(A)
return self.tuple < other.tuple
def __hash__( self : Dict) -> int:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _lowerCamelCase ( cls : int , A : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
return self.version_str
def A ( _UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(_UpperCAmelCase ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def A ( _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 711
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 0
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str]) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
_UpperCAmelCase = Vector()
def _lowerCamelCase ( self : str) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(A) , '(0,0,0,0,0,1)')
def _lowerCamelCase ( self : str) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3, 4])
self.assertEqual(len(A) , 4)
def _lowerCamelCase ( self : int) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2])
_UpperCAmelCase = Vector([1, 2, 3, 4, 5])
_UpperCAmelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_UpperCAmelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3)
def _lowerCamelCase ( self : Any) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3])
_UpperCAmelCase = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _lowerCamelCase ( self : Any) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3])
_UpperCAmelCase = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _lowerCamelCase ( self : Union[str, Any]) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3])
_UpperCAmelCase = Vector([2, -1, 4]) # for test of dot product
_UpperCAmelCase = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , '(3.0,6.0,9.0)')
self.assertEqual((a * b) , 0)
def _lowerCamelCase ( self : Optional[Any]) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10)).count('0') , 10)
def _lowerCamelCase ( self : Optional[int]) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , '(0,1,0)')
def _lowerCamelCase ( self : Dict) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3])
_UpperCAmelCase = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , A , A)) , '(3,4,7)')
def _lowerCamelCase ( self : int) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 0, 0, 0, 0, 0])
_UpperCAmelCase = x.copy()
self.assertEqual(str(A) , str(A))
def _lowerCamelCase ( self : Optional[int]) -> None:
"""simple docstring"""
_UpperCAmelCase = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(A) , '(0,1,0)')
def _lowerCamelCase ( self : int) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(A))
def _lowerCamelCase ( self : List[str]) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCAmelCase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(A , A))
def _lowerCamelCase ( self : Tuple) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCAmelCase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(A , A))
def _lowerCamelCase ( self : Optional[int]) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _lowerCamelCase ( self : List[str]) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
_UpperCAmelCase = Vector([1, 2, 3])
self.assertEqual('(14,32,50)' , str(a * x))
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2))
def _lowerCamelCase ( self : str) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(A))
def _lowerCamelCase ( self : List[Any]) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.0_1)
def _lowerCamelCase ( self : str) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b))
def _lowerCamelCase ( self : List[str]) -> None:
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b))
def _lowerCamelCase ( self : List[str]) -> None:
"""simple docstring"""
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import math
import sys
import cva
import numpy as np
def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = math.sqrt(_UpperCAmelCase )
_UpperCAmelCase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def A ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
_UpperCAmelCase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.zeros(img.shape )
_UpperCAmelCase = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_UpperCAmelCase = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = img_s - img_s[kernel_size // 2, kernel_size // 2]
_UpperCAmelCase = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
_UpperCAmelCase = val
return imga
def A ( _UpperCAmelCase : list ) -> tuple:
'''simple docstring'''
_UpperCAmelCase = args[1] if args[1:] else '../image_data/lena.jpg'
_UpperCAmelCase = float(args[2] ) if args[2:] else 1.0
_UpperCAmelCase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_UpperCAmelCase = int(args[4] )
_UpperCAmelCase = kernel_size + abs(kernel_size % 2 - 1 )
else:
_UpperCAmelCase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parse_args(sys.argv)
UpperCAmelCase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
UpperCAmelCase__ = img / 255
UpperCAmelCase__ = out.astype("float32")
UpperCAmelCase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
UpperCAmelCase__ = out * 255
UpperCAmelCase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 714
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 0
|
def _A ( _UpperCAmelCase : int = 4_000_000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 715
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowerCAmelCase ( A ):
UpperCamelCase = 4_2
UpperCamelCase = 4_2
class __lowerCAmelCase ( nn.Module ):
UpperCamelCase = 4_2
UpperCamelCase = (1_6, 3_2, 9_6, 2_5_6)
UpperCamelCase = jnp.floataa
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = []
for i in range(len(self.block_out_channels) - 1):
_UpperCAmelCase = self.block_out_channels[i]
_UpperCAmelCase = self.block_out_channels[i + 1]
_UpperCAmelCase = nn.Conv(
A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A)
_UpperCAmelCase = nn.Conv(
A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A)
_UpperCAmelCase = blocks
_UpperCAmelCase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , A : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.conv_in(A)
_UpperCAmelCase = nn.silu(A)
for block in self.blocks:
_UpperCAmelCase = block(A)
_UpperCAmelCase = nn.silu(A)
_UpperCAmelCase = self.conv_out(A)
return embedding
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , A , A ):
UpperCamelCase = 3_2
UpperCamelCase = 4
UpperCamelCase = (
'''CrossAttnDownBlock2D''',
'''CrossAttnDownBlock2D''',
'''CrossAttnDownBlock2D''',
'''DownBlock2D''',
)
UpperCamelCase = False
UpperCamelCase = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
UpperCamelCase = 2
UpperCamelCase = 8
UpperCamelCase = None
UpperCamelCase = 1_2_8_0
UpperCamelCase = 0.0
UpperCamelCase = False
UpperCamelCase = jnp.floataa
UpperCamelCase = True
UpperCamelCase = 0
UpperCamelCase = '''rgb'''
UpperCamelCase = (1_6, 3_2, 9_6, 2_5_6)
def _lowerCamelCase ( self : str , A : jax.random.KeyArray) -> FrozenDict:
"""simple docstring"""
_UpperCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCAmelCase = jnp.zeros(A , dtype=jnp.floataa)
_UpperCAmelCase = jnp.ones((1,) , dtype=jnp.intaa)
_UpperCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
_UpperCAmelCase = (1, 3, self.sample_size * 8, self.sample_size * 8)
_UpperCAmelCase = jnp.zeros(A , dtype=jnp.floataa)
_UpperCAmelCase , _UpperCAmelCase = jax.random.split(A)
_UpperCAmelCase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(A , A , A , A , A)["params"]
def _lowerCamelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.block_out_channels
_UpperCAmelCase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCAmelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCAmelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCAmelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
_UpperCAmelCase = FlaxTimestepEmbedding(A , dtype=self.dtype)
_UpperCAmelCase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_UpperCAmelCase = self.only_cross_attention
if isinstance(A , A):
_UpperCAmelCase = (only_cross_attention,) * len(self.down_block_types)
if isinstance(A , A):
_UpperCAmelCase = (num_attention_heads,) * len(self.down_block_types)
# down
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = block_out_channels[0]
_UpperCAmelCase = nn.Conv(
A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A)
for i, down_block_type in enumerate(self.down_block_types):
_UpperCAmelCase = output_channel
_UpperCAmelCase = block_out_channels[i]
_UpperCAmelCase = i == len(A) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCAmelCase = FlaxCrossAttnDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_UpperCAmelCase = FlaxDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A)
for _ in range(self.layers_per_block):
_UpperCAmelCase = nn.Conv(
A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A)
if not is_final_block:
_UpperCAmelCase = nn.Conv(
A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A)
_UpperCAmelCase = down_blocks
_UpperCAmelCase = controlnet_down_blocks
# mid
_UpperCAmelCase = block_out_channels[-1]
_UpperCAmelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_UpperCAmelCase = nn.Conv(
A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[Any] , A : Any , A : Any , A : Any , A : List[str] , A : float = 1.0 , A : bool = True , A : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_UpperCAmelCase = jnp.flip(A , axis=1)
# 1. time
if not isinstance(A , jnp.ndarray):
_UpperCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(A , jnp.ndarray) and len(timesteps.shape) == 0:
_UpperCAmelCase = timesteps.astype(dtype=jnp.floataa)
_UpperCAmelCase = jnp.expand_dims(A , 0)
_UpperCAmelCase = self.time_proj(A)
_UpperCAmelCase = self.time_embedding(A)
# 2. pre-process
_UpperCAmelCase = jnp.transpose(A , (0, 2, 3, 1))
_UpperCAmelCase = self.conv_in(A)
_UpperCAmelCase = jnp.transpose(A , (0, 2, 3, 1))
_UpperCAmelCase = self.controlnet_cond_embedding(A)
sample += controlnet_cond
# 3. down
_UpperCAmelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(A , A):
_UpperCAmelCase , _UpperCAmelCase = down_block(A , A , A , deterministic=not train)
else:
_UpperCAmelCase , _UpperCAmelCase = down_block(A , A , deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
_UpperCAmelCase = self.mid_block(A , A , A , deterministic=not train)
# 5. contronet blocks
_UpperCAmelCase = ()
for down_block_res_sample, controlnet_block in zip(A , self.controlnet_down_blocks):
_UpperCAmelCase = controlnet_block(A)
controlnet_down_block_res_samples += (down_block_res_sample,)
_UpperCAmelCase = controlnet_down_block_res_samples
_UpperCAmelCase = self.controlnet_mid_block(A)
# 6. scaling
_UpperCAmelCase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A , mid_block_res_sample=A)
| 716
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 0
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_UpperCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
_UpperCAmelCase = transform(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
return image
def A ( _UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
if "visual_encoder" in key:
_UpperCAmelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , _UpperCAmelCase )
if "blocks" in key:
_UpperCAmelCase = re.sub(R'blocks' , 'layers' , _UpperCAmelCase )
if "attn" in key:
_UpperCAmelCase = re.sub(R'attn' , 'self_attn' , _UpperCAmelCase )
if "norm1" in key:
_UpperCAmelCase = re.sub(R'norm1' , 'layer_norm1' , _UpperCAmelCase )
if "norm2" in key:
_UpperCAmelCase = re.sub(R'norm2' , 'layer_norm2' , _UpperCAmelCase )
if "encoder.norm" in key:
_UpperCAmelCase = re.sub(R'encoder.norm' , 'post_layernorm' , _UpperCAmelCase )
if "encoder.patch_embed.proj" in key:
_UpperCAmelCase = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , _UpperCAmelCase )
if "encoder.pos_embed" in key:
_UpperCAmelCase = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , _UpperCAmelCase )
if "encoder.cls_token" in key:
_UpperCAmelCase = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , _UpperCAmelCase )
if "self_attn" in key:
_UpperCAmelCase = re.sub(R'self_attn.proj' , 'self_attn.projection' , _UpperCAmelCase )
return key
@torch.no_grad()
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = BlipConfig.from_pretrained(_UpperCAmelCase )
else:
_UpperCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
_UpperCAmelCase = BlipForConditionalGeneration(_UpperCAmelCase ).eval()
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_UpperCAmelCase = blip_decoder(pretrained=_UpperCAmelCase , image_size=384 , vit='base' )
_UpperCAmelCase = pt_model.eval()
_UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(_UpperCAmelCase )
_UpperCAmelCase = rename_key(_UpperCAmelCase )
_UpperCAmelCase = value
hf_model.load_state_dict(_UpperCAmelCase )
_UpperCAmelCase = 384
_UpperCAmelCase = load_demo_image(image_size=_UpperCAmelCase , device='cpu' )
_UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_UpperCAmelCase = tokenizer(['a picture of'] ).input_ids
_UpperCAmelCase = hf_model.generate(_UpperCAmelCase , _UpperCAmelCase )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
_UpperCAmelCase = hf_model.generate(_UpperCAmelCase )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCAmelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_UpperCAmelCase = blip_vqa(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit='base' )
vqa_model.eval()
_UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(_UpperCAmelCase )
_UpperCAmelCase = rename_key(_UpperCAmelCase )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForQuestionAnswering(_UpperCAmelCase )
hf_vqa_model.load_state_dict(_UpperCAmelCase )
_UpperCAmelCase = ['How many dogs are in this image?']
_UpperCAmelCase = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids
_UpperCAmelCase = hf_vqa_model.generate(_UpperCAmelCase , _UpperCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_UpperCAmelCase = blip_itm(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit='base' )
itm_model.eval()
_UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(_UpperCAmelCase )
_UpperCAmelCase = rename_key(_UpperCAmelCase )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForImageTextRetrieval(_UpperCAmelCase )
_UpperCAmelCase = ['A picture of a woman with a dog sitting in a beach']
_UpperCAmelCase = tokenizer(
_UpperCAmelCase , return_tensors='pt' , padding='max_length' , truncation=_UpperCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCAmelCase )
hf_itm_model.eval()
_UpperCAmelCase = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase )
_UpperCAmelCase = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 718
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def A ( _UpperCAmelCase : Dict=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(add_help=_UpperCAmelCase , allow_abbrev=_UpperCAmelCase )
# The main config parser
_UpperCAmelCase = config_command_parser(_UpperCAmelCase )
# The subparser to add commands to
_UpperCAmelCase = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(_UpperCAmelCase , parents=[parent_parser] )
update_command_parser(_UpperCAmelCase , parents=[parent_parser] )
return config_parser
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = get_config_parser()
_UpperCAmelCase = config_parser.parse_args()
if not hasattr(_UpperCAmelCase , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 719
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 0
|
import fire
from utils import calculate_rouge, save_json
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Any ) -> str:
'''simple docstring'''
_UpperCAmelCase = [x.strip() for x in open(_UpperCAmelCase ).readlines()]
_UpperCAmelCase = [x.strip() for x in open(_UpperCAmelCase ).readlines()][: len(_UpperCAmelCase )]
_UpperCAmelCase = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
if save_path is not None:
save_json(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 720
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''bloom'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Dict , A : int=25_08_80 , A : Union[str, Any]=64 , A : Any=2 , A : Any=8 , A : List[str]=1E-5 , A : Optional[Any]=0.0_2 , A : List[Any]=True , A : Any=1 , A : Tuple=2 , A : Optional[int]=False , A : str=0.0 , A : Optional[int]=0.0 , A : str=1 , A : str=False , **A : List[Any] , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('n_embed' , A)
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = pretraining_tp
_UpperCAmelCase = apply_residual_connection_post_layernorm
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = slow_but_exact
super().__init__(bos_token_id=A , eos_token_id=A , **A)
class __lowerCAmelCase ( A ):
UpperCamelCase = version.parse('''1.12''' )
def __init__( self : int , A : PretrainedConfig , A : str = "default" , A : List[PatchingSpec] = None , A : bool = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(A , task=A , patching_specs=A , use_past=A)
if not getattr(self._config , 'pad_token_id' , A):
# TODO: how to do that better?
_UpperCAmelCase = 0
@property
def _lowerCamelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_UpperCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(A , direction='inputs' , inverted_values_shape=A)
_UpperCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
return self._config.n_head
@property
def _lowerCamelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 1E-3
def _lowerCamelCase ( self : List[str] , A : "PreTrainedTokenizer" , A : int = -1 , A : int = -1 , A : bool = False , A : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase = super(A , self).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A)
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = self._config.hidden_size // self.num_attention_heads
_UpperCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCAmelCase = [
(torch.zeros(A), torch.zeros(A)) for _ in range(self.num_layers)
]
_UpperCAmelCase = common_inputs['attention_mask']
if self.use_past:
_UpperCAmelCase = ordered_inputs['attention_mask'].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(A , A , dtype=A)] , dim=1)
return ordered_inputs
@property
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
return 13
| 721
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 700
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''timesformer'''
def __init__( self : Optional[Any] , A : str=2_24 , A : List[str]=16 , A : Any=3 , A : Optional[Any]=8 , A : Optional[Any]=7_68 , A : str=12 , A : int=12 , A : str=30_72 , A : Optional[Any]="gelu" , A : Tuple=0.0 , A : str=0.0 , A : Union[str, Any]=0.0_2 , A : List[Any]=1E-6 , A : Any=True , A : Tuple="divided_space_time" , A : Optional[int]=0 , **A : Tuple , ) -> str:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_frames
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = attention_type
_UpperCAmelCase = drop_path_rate
| 701
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : List[str]=13 , A : List[str]=7 , A : List[Any]=True , A : List[Any]=True , A : int=True , A : Union[str, Any]=True , A : Dict=99 , A : int=16 , A : Optional[int]=36 , A : List[Any]=6 , A : int=6 , A : Optional[int]=6 , A : str=37 , A : int="gelu" , A : int=0.1 , A : List[Any]=0.1 , A : str=5_12 , A : str=16 , A : List[Any]=2 , A : Optional[int]=0.0_2 , A : Any=3 , A : List[Any]=4 , A : Optional[int]=None , ) -> str:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_hidden_groups
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowerCamelCase ( self : Optional[int] , A : Optional[int] , A : str , A : int , A : Tuple , A : Union[str, Any] , A : List[Any] , A : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = AlbertModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A)
_UpperCAmelCase = model(A , token_type_ids=A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _lowerCamelCase ( self : List[Any] , A : List[str] , A : Any , A : Tuple , A : List[Any] , A : int , A : List[str] , A : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = AlbertForPreTraining(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(
A , attention_mask=A , token_type_ids=A , labels=A , sentence_order_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def _lowerCamelCase ( self : Optional[Any] , A : Any , A : Any , A : Dict , A : Optional[Any] , A : int , A : int , A : Dict) -> Any:
"""simple docstring"""
_UpperCAmelCase = AlbertForMaskedLM(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Tuple , A : List[str] , A : List[Any] , A : Tuple , A : Any , A : str , A : List[Any] , A : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = AlbertForQuestionAnswering(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : List[Any] , A : List[Any] , A : List[str] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : Any , A : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForSequenceClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Optional[Any] , A : Optional[Any] , A : Optional[Any] , A : int , A : Tuple , A : Any , A : Union[str, Any] , A : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForTokenClassification(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , attention_mask=A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any] , A : Optional[int] , A : Union[str, Any] , A : Optional[int] , A : Tuple , A : List[str] , A : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = AlbertForMultipleChoice(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCamelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
def _lowerCamelCase ( self : Optional[Any] , A : Optional[Any] , A : Optional[int] , A : Tuple=False) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = super()._prepare_for_class(A , A , return_labels=A)
if return_labels:
if model_class in get_values(A):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A)
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A)
return inputs_dict
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = AlbertModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , hidden_size=37)
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A)
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A)
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A)
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A)
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A)
def _lowerCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*A)
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AlbertModel.from_pretrained(A)
self.assertIsNotNone(A)
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = AlbertModel.from_pretrained('albert-base-v2')
_UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCAmelCase = model(A , attention_mask=A)[0]
_UpperCAmelCase = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , A)
_UpperCAmelCase = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4))
| 702
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 0
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''input_values''', '''padding_mask''']
def __init__( self : Any , A : int = 1 , A : int = 2_40_00 , A : float = 0.0 , A : float = None , A : float = None , **A : Dict , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=A , sampling_rate=A , padding_value=A , **A)
_UpperCAmelCase = chunk_length_s
_UpperCAmelCase = overlap
@property
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
def __call__( self : Tuple , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Optional[Union[bool, str, PaddingStrategy]] = None , A : Optional[bool] = False , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[int] = None , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.')
elif padding is None:
# by default let's pad the inputs
_UpperCAmelCase = True
_UpperCAmelCase = bool(
isinstance(A , (list, tuple)) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list))))
if is_batched:
_UpperCAmelCase = [np.asarray(A , dtype=np.floataa).T for audio in raw_audio]
elif not is_batched and not isinstance(A , np.ndarray):
_UpperCAmelCase = np.asarray(A , dtype=np.floataa)
elif isinstance(A , np.ndarray) and raw_audio.dtype is np.dtype(np.floataa):
_UpperCAmelCase = raw_audio.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCAmelCase = [np.asarray(A).T]
# verify inputs are valid
for idx, example in enumerate(A):
if example.ndim > 2:
raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}")
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels")
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels")
_UpperCAmelCase = None
_UpperCAmelCase = BatchFeature({'input_values': raw_audio})
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_UpperCAmelCase = min(array.shape[0] for array in raw_audio)
_UpperCAmelCase = int(np.floor(max_length / self.chunk_stride))
_UpperCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_UpperCAmelCase = max(array.shape[0] for array in raw_audio)
_UpperCAmelCase = int(np.ceil(max_length / self.chunk_stride))
_UpperCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_UpperCAmelCase = 'max_length'
else:
_UpperCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
_UpperCAmelCase = self.pad(
A , max_length=A , truncation=A , padding=A , return_attention_mask=A , )
if padding:
_UpperCAmelCase = padded_inputs.pop('attention_mask')
_UpperCAmelCase = []
for example in padded_inputs.pop('input_values'):
if self.feature_size == 1:
_UpperCAmelCase = example[..., None]
input_values.append(example.T)
_UpperCAmelCase = input_values
if return_tensors is not None:
_UpperCAmelCase = padded_inputs.convert_to_tensors(A)
return padded_inputs
| 703
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MvpTokenizer
UpperCamelCase = MvpTokenizerFast
UpperCamelCase = True
UpperCamelCase = filter_roberta_detectors
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(A))
def _lowerCamelCase ( self : Optional[Any] , **A : Optional[int]) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[Any] , **A : Union[str, Any]) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Any , A : str) -> Dict:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
return MvpTokenizer.from_pretrained('RUCAIBox/mvp')
@cached_property
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp')
@require_torch
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(A , max_length=len(A) , padding=A , return_tensors='pt')
self.assertIsInstance(A , A)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(A , A)
# Test that special tokens are reset
@require_torch
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(A , padding=A , return_tensors='pt')
# check if input_ids are returned and no labels
self.assertIn('input_ids' , A)
self.assertIn('attention_mask' , A)
self.assertNotIn('labels' , A)
self.assertNotIn('decoder_attention_mask' , A)
@require_torch
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(text_target=A , max_length=32 , padding='max_length' , return_tensors='pt')
self.assertEqual(32 , targets['input_ids'].shape[1])
@require_torch
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] , padding=A , truncation=A , return_tensors='pt')
self.assertIsInstance(A , A)
self.assertEqual(batch.input_ids.shape , (2, 10_24))
@require_torch
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['A long paragraph for summarization.']
_UpperCAmelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(A , text_target=A , return_tensors='pt')
_UpperCAmelCase = inputs['input_ids']
_UpperCAmelCase = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = 'A, <mask> AllenNLP sentence.'
_UpperCAmelCase = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A)
_UpperCAmelCase = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(
A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
| 704
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 0
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCAmelCase__ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCAmelCase__ = "sshleifer/student_marian_en_ro_6_1"
UpperCAmelCase__ = "sshleifer/tiny-mbart"
@require_torch
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : Optional[int] , A : Dict=False , A : str=None , A : Dict=True , A : Tuple=True , A : Tuple=True , A : List[Any]=True , ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A , num_train_epochs=1 , distributed=A , extra_args_str=A , predict_with_generate=A , do_train=A , do_eval=A , do_predict=A , )
_UpperCAmelCase = TrainerState.load_from_json(os.path.join(A , 'trainer_state.json')).log_history
if not do_eval:
return
_UpperCAmelCase = [log for log in logs if 'eval_loss' in log.keys()]
_UpperCAmelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_UpperCAmelCase = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , A)
assert not math.isnan(float(last_step_stats['eval_loss'])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A)
@require_torch_multi_gpu
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A)
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A , extra_args_str='--sharded_ddp simple')
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A , extra_args_str='--sharded_ddp simple --fp16')
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=A)
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=A , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=A)
@require_apex
@require_torch_gpu
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=A , extra_args_str='--fp16 --fp16_backend=apex')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A , extra_args_str='--fp16 --fp16_backend=apex')
@parameterized.expand(['base', 'low', 'high', 'mixed'])
@require_torch_multi_gpu
def _lowerCamelCase ( self : Optional[Any] , A : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
_UpperCAmelCase = experiments[experiment_id]
_UpperCAmelCase = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
_UpperCAmelCase = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A , extra_args_str=data['extra_args_str'])
_UpperCAmelCase = len(re.findall(A , cl.err))
self.assertEqual(A , data['n_matches'])
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A , learning_rate=3E-4 , num_train_epochs=10 , distributed=A , )
# Check metrics
_UpperCAmelCase = TrainerState.load_from_json(os.path.join(A , 'trainer_state.json')).log_history
_UpperCAmelCase = [log for log in logs if 'eval_loss' in log.keys()]
_UpperCAmelCase = eval_metrics[0]
_UpperCAmelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , A)
# test if do_predict saves generations and metrics
_UpperCAmelCase = os.listdir(A)
_UpperCAmelCase = {os.path.basename(A) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _lowerCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A : str) -> Tuple[int, float]:
_UpperCAmelCase = '--skip_memory_metrics 0'
_UpperCAmelCase = self.run_trainer(
max_len=1_28 , model_name=A , learning_rate=3E-4 , num_train_epochs=1 , optim=A , distributed=A , extra_args_str=A , do_eval=A , do_predict=A , n_gpus_to_use=1 , )
# Check metrics
_UpperCAmelCase = TrainerState.load_from_json(Path(A , 'trainer_state.json')).log_history
_UpperCAmelCase = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20)
_UpperCAmelCase = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20)
_UpperCAmelCase = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
_UpperCAmelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_UpperCAmelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
_UpperCAmelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_UpperCAmelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_UpperCAmelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A , A , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A , A , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A , A , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def _lowerCamelCase ( self : Union[str, Any] , A : int , A : str , A : int , A : float = 3E-3 , A : str = "adafactor" , A : bool = False , A : str = None , A : int = 0 , A : bool = True , A : bool = True , A : bool = True , A : bool = True , A : int = None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_UpperCAmelCase = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A)}\n ".split()
_UpperCAmelCase = '\n --do_predict\n '.split()
_UpperCAmelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_UpperCAmelCase = get_gpu_count()
_UpperCAmelCase = get_torch_dist_unique_port()
_UpperCAmelCase = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_UpperCAmelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A , env=self.get_env())
else:
_UpperCAmelCase = ['run_translation.py'] + args
with patch.object(A , 'argv' , A):
main()
return output_dir
| 705
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 706
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 0
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
_UpperCAmelCase = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
_UpperCAmelCase = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
_UpperCAmelCase = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
UpperCAmelCase__ = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
return sd
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=rename_keys_prefix ) -> str:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_UpperCAmelCase = key
for name_pair in rename_keys_prefix:
_UpperCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
_UpperCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_UpperCAmelCase = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
_UpperCAmelCase = 'pretraining'
if "vcr" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 2_048}
elif "vqa" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 2_048}
elif "nlvr" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 1_024}
else:
raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 512}
_UpperCAmelCase = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 2_048}
_UpperCAmelCase = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_UpperCAmelCase = {'visual_embedding_dim': 2_048, 'num_labels': 3_129}
_UpperCAmelCase = 'vqa'
elif "nlvr" in checkpoint_path:
_UpperCAmelCase = {
'visual_embedding_dim': 1_024,
'num_labels': 2,
}
_UpperCAmelCase = 'nlvr'
_UpperCAmelCase = VisualBertConfig(**_UpperCAmelCase )
# Load State Dict
_UpperCAmelCase = load_state_dict(_UpperCAmelCase )
_UpperCAmelCase = get_new_dict(_UpperCAmelCase , _UpperCAmelCase )
if model_type == "pretraining":
_UpperCAmelCase = VisualBertForPreTraining(_UpperCAmelCase )
elif model_type == "vqa":
_UpperCAmelCase = VisualBertForQuestionAnswering(_UpperCAmelCase )
elif model_type == "nlvr":
_UpperCAmelCase = VisualBertForVisualReasoning(_UpperCAmelCase )
elif model_type == "multichoice":
_UpperCAmelCase = VisualBertForMultipleChoice(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# Save Checkpoints
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 708
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 0
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A ( _UpperCAmelCase : int = 3 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_UpperCAmelCase ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
_UpperCAmelCase = QuantumRegister(_UpperCAmelCase , 'qr' )
_UpperCAmelCase = ClassicalRegister(_UpperCAmelCase , 'cr' )
_UpperCAmelCase = QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = number_of_qubits
for i in range(_UpperCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_UpperCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _UpperCAmelCase , _UpperCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_UpperCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_UpperCAmelCase , _UpperCAmelCase )
# simulate with 10000 shots
_UpperCAmelCase = Aer.get_backend('qasm_simulator' )
_UpperCAmelCase = execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_000 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 709
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 0
|
import numpy as np
def A ( _UpperCAmelCase : np.array ) -> np.array:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( _UpperCAmelCase : np.array ) -> np.array:
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 0
|
def A ( _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not head:
return True
# split the list to two parts
_UpperCAmelCase , _UpperCAmelCase = head.next, head
while fast and fast.next:
_UpperCAmelCase = fast.next.next
_UpperCAmelCase = slow.next
_UpperCAmelCase = slow.next
_UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
_UpperCAmelCase = None
while second:
_UpperCAmelCase = second.next
_UpperCAmelCase = node
_UpperCAmelCase = second
_UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_UpperCAmelCase = node.next
_UpperCAmelCase = head.next
return True
def A ( _UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = head
while fast and fast.next:
_UpperCAmelCase , _UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
_UpperCAmelCase = [slow.val]
while slow.next:
_UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_UpperCAmelCase = cur.next
return True
def A ( _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
if not head or not head.next:
return True
_UpperCAmelCase = {}
_UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(_UpperCAmelCase )
else:
_UpperCAmelCase = [pos]
_UpperCAmelCase = head.next
pos += 1
_UpperCAmelCase = pos - 1
_UpperCAmelCase = 0
for v in d.values():
if len(_UpperCAmelCase ) % 2 != 0:
middle += 1
else:
_UpperCAmelCase = 0
for i in range(0 , len(_UpperCAmelCase ) ):
if v[i] + v[len(_UpperCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 711
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=A , )
assert hasattr(self , 'env')
def _lowerCamelCase ( self : Any , A : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {
'enabled': True,
'processes_per_host': 8,
}
_UpperCAmelCase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_UpperCAmelCase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_UpperCAmelCase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=A , py_version='py36' , )
def _lowerCamelCase ( self : int , A : str) -> int:
"""simple docstring"""
TrainingJobAnalytics(A).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(1,)])
def _lowerCamelCase ( self : List[str] , A : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.create_estimator(A)
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 99_99_99)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A)
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
UpperCAmelCase__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 714
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase__ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase__ = "UperNetConfig"
class __lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , A : int , A : int , A : Union[int, Tuple[int, int]] , A : Union[int, Tuple[int, int], str] = 0 , A : bool = False , A : Union[int, Tuple[int, int]] = 1 , ) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Convad(
in_channels=A , out_channels=A , kernel_size=A , padding=A , bias=A , dilation=A , )
_UpperCAmelCase = nn.BatchNormad(A)
_UpperCAmelCase = nn.ReLU()
def _lowerCamelCase ( self : Union[str, Any] , A : torch.Tensor) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase = self.conv(A)
_UpperCAmelCase = self.batch_norm(A)
_UpperCAmelCase = self.activation(A)
return output
class __lowerCAmelCase ( nn.Module ):
def __init__( self : str , A : int , A : int , A : int) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = [
nn.AdaptiveAvgPoolad(A),
UperNetConvModule(A , A , kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(A) , A)
def _lowerCamelCase ( self : Any , A : torch.Tensor) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase = input
for layer in self.layers:
_UpperCAmelCase = layer(A)
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Dict , A : Tuple[int, ...] , A : int , A : int , A : bool) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = pool_scales
_UpperCAmelCase = align_corners
_UpperCAmelCase = in_channels
_UpperCAmelCase = channels
_UpperCAmelCase = []
for i, pool_scale in enumerate(A):
_UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=A , in_channels=A , channels=A)
self.blocks.append(A)
self.add_module(str(A) , A)
def _lowerCamelCase ( self : str , A : torch.Tensor) -> List[torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = []
for ppm in self.blocks:
_UpperCAmelCase = ppm(A)
_UpperCAmelCase = nn.functional.interpolate(
A , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners)
ppm_outs.append(A)
return ppm_outs
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , A : Tuple , A : List[str]) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
_UpperCAmelCase = in_channels
_UpperCAmelCase = config.hidden_size
_UpperCAmelCase = False
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1)
# PSP Module
_UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_UpperCAmelCase = UperNetConvModule(A , self.channels , kernel_size=1)
_UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1)
self.lateral_convs.append(A)
self.fpn_convs.append(A)
_UpperCAmelCase = UperNetConvModule(
len(self.in_channels) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
self.apply(self._init_weights)
def _lowerCamelCase ( self : Optional[Any] , A : Dict) -> Any:
"""simple docstring"""
if isinstance(A , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self : Tuple , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = inputs[-1]
_UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(A))
_UpperCAmelCase = torch.cat(A , dim=1)
_UpperCAmelCase = self.bottleneck(A)
return output
def _lowerCamelCase ( self : List[str] , A : torch.Tensor) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(A))
# build top-down path
_UpperCAmelCase = len(A)
for i in range(used_backbone_levels - 1 , 0 , -1):
_UpperCAmelCase = laterals[i - 1].shape[2:]
_UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=A , mode='bilinear' , align_corners=self.align_corners)
# build outputs
_UpperCAmelCase = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1 , 0 , -1):
_UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners)
_UpperCAmelCase = torch.cat(A , dim=1)
_UpperCAmelCase = self.fpn_bottleneck(A)
_UpperCAmelCase = self.classifier(A)
return output
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Any , A : List[str] , A : int = 2 , A : int = 3 , A : Union[int, Tuple[int, int]] = 1) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.auxiliary_in_channels
_UpperCAmelCase = config.auxiliary_channels
_UpperCAmelCase = config.auxiliary_num_convs
_UpperCAmelCase = config.auxiliary_concat_input
_UpperCAmelCase = in_index
_UpperCAmelCase = (kernel_size // 2) * dilation
_UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=A , padding=A , dilation=A))
for i in range(self.num_convs - 1):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=A , padding=A , dilation=A))
if self.num_convs == 0:
_UpperCAmelCase = nn.Identity()
else:
_UpperCAmelCase = nn.Sequential(*A)
if self.concat_input:
_UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=A , padding=kernel_size // 2)
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1)
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[Any]) -> Any:
"""simple docstring"""
if isinstance(A , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self : Tuple , A : torch.Tensor) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase = encoder_hidden_states[self.in_index]
_UpperCAmelCase = self.convs(A)
if self.concat_input:
_UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1))
_UpperCAmelCase = self.classifier(A)
return output
class __lowerCAmelCase ( A ):
UpperCamelCase = UperNetConfig
UpperCamelCase = '''pixel_values'''
UpperCamelCase = True
def _lowerCamelCase ( self : Any , A : str) -> Dict:
"""simple docstring"""
if isinstance(A , A):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCamelCase ( self : str , A : str , A : int=False) -> Dict:
"""simple docstring"""
if isinstance(A , A):
_UpperCAmelCase = value
UpperCAmelCase__ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , A , )
class __lowerCAmelCase ( A ):
def __init__( self : Dict , A : Optional[int]) -> int:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = AutoBackbone.from_config(config.backbone_config)
# Semantic segmentation head(s)
_UpperCAmelCase = UperNetHead(A , in_channels=self.backbone.channels)
_UpperCAmelCase = UperNetFCNHead(A) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@replace_return_docstrings(output_type=A , config_class=_CONFIG_FOR_DOC)
def _lowerCamelCase ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
_UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
A , output_hidden_states=A , output_attentions=A)
_UpperCAmelCase = outputs.feature_maps
_UpperCAmelCase = self.decode_head(A)
_UpperCAmelCase = nn.functional.interpolate(A , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = None
if self.auxiliary_head is not None:
_UpperCAmelCase = self.auxiliary_head(A)
_UpperCAmelCase = nn.functional.interpolate(
A , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one')
else:
# compute weighted loss
_UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)
_UpperCAmelCase = loss_fct(A , A)
_UpperCAmelCase = loss_fct(A , A)
_UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_UpperCAmelCase = (logits,) + outputs[1:]
else:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=A , logits=A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 716
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowerCAmelCase ( A ):
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( A ):
def __init__( self : Optional[Any] , A : List[Any]=1 , A : List[Any]=0 , A : Optional[int]=2 , A : str=5_12 , A : List[Any]="cls" , A : int=False , A : Tuple=True , **A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A)
_UpperCAmelCase = project_dim
_UpperCAmelCase = pooler_fn
_UpperCAmelCase = learn_encoder
_UpperCAmelCase = use_attention_mask
class __lowerCAmelCase ( A ):
UpperCamelCase = [R'''pooler''', R'''logit_scale''']
UpperCamelCase = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCamelCase = '''roberta'''
UpperCamelCase = RobertaSeriesConfig
def __init__( self : Any , A : Dict) -> Any:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = XLMRobertaModel(A)
_UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim)
_UpperCAmelCase = getattr(A , 'has_pre_transformation' , A)
if self.has_pre_transformation:
_UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim)
_UpperCAmelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps)
self.post_init()
def _lowerCamelCase ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.base_model(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , encoder_hidden_states=A , encoder_attention_mask=A , output_attentions=A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=A , )
if self.has_pre_transformation:
_UpperCAmelCase = outputs['hidden_states'][-2]
_UpperCAmelCase = self.pre_LN(A)
_UpperCAmelCase = self.transformation_pre(A)
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_UpperCAmelCase = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class __lowerCAmelCase ( A , A ):
UpperCamelCase = '''focalnet'''
def __init__( self : str , A : Any=2_24 , A : Optional[Any]=4 , A : List[Any]=3 , A : int=96 , A : Any=False , A : Union[str, Any]=[1_92, 3_84, 7_68, 7_68] , A : Optional[int]=[2, 2, 6, 2] , A : Any=[2, 2, 2, 2] , A : Dict=[3, 3, 3, 3] , A : List[str]="gelu" , A : Dict=4.0 , A : List[str]=0.0 , A : List[Any]=0.1 , A : Optional[int]=False , A : Union[str, Any]=1E-4 , A : Any=False , A : Dict=False , A : Dict=False , A : List[Any]=0.0_2 , A : Union[str, Any]=1E-5 , A : Optional[Any]=32 , A : Union[str, Any]=None , A : Dict=None , **A : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = use_conv_embed
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = focal_levels
_UpperCAmelCase = focal_windows
_UpperCAmelCase = hidden_act
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_layerscale
_UpperCAmelCase = layerscale_value
_UpperCAmelCase = use_post_layernorm
_UpperCAmelCase = use_post_layernorm_in_modulation
_UpperCAmelCase = normalize_modulator
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = ['stem'] + [F"stage{idx}" for idx in range(1 , len(self.depths) + 1)]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names)
| 718
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int] , A : List[Any] , A : Optional[Any]=7 , A : Union[str, Any]=3 , A : Any=18 , A : Union[str, Any]=30 , A : Optional[int]=4_00 , A : Tuple=True , A : Any=None , A : int=True , A : Any=False , A : Any=True , A : Any=True , A : int=[0.5, 0.5, 0.5] , A : List[str]=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self)
@property
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A , 'do_resize'))
self.assertTrue(hasattr(A , 'size'))
self.assertTrue(hasattr(A , 'do_thumbnail'))
self.assertTrue(hasattr(A , 'do_align_long_axis'))
self.assertTrue(hasattr(A , 'do_pad'))
self.assertTrue(hasattr(A , 'do_normalize'))
self.assertTrue(hasattr(A , 'image_mean'))
self.assertTrue(hasattr(A , 'image_std'))
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 20})
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84))
self.assertEqual(image_processor.size , {'height': 84, 'width': 42})
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
pass
@is_flaky()
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A)
for image in image_inputs:
self.assertIsInstance(A , Image.Image)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A)
for image in image_inputs:
self.assertIsInstance(A , np.ndarray)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A)
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 719
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 0
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase__ = 50_0000
UpperCAmelCase__ , UpperCAmelCase__ = os.path.split(__file__)
UpperCAmelCase__ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def A ( _UpperCAmelCase : datasets.Dataset , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = dataset.map(**_UpperCAmelCase )
@get_duration
def A ( _UpperCAmelCase : datasets.Dataset , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = dataset.filter(**_UpperCAmelCase )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
_UpperCAmelCase = generate_example_dataset(
os.path.join(_UpperCAmelCase , 'dataset.arrow' ) , _UpperCAmelCase , num_examples=_UpperCAmelCase )
_UpperCAmelCase = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCAmelCase )
def tokenize(_UpperCAmelCase : List[str] ):
return tokenizer(examples['text'] )
_UpperCAmelCase = map(_UpperCAmelCase )
_UpperCAmelCase = map(_UpperCAmelCase , batched=_UpperCAmelCase )
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='numpy' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='pandas' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='torch' , columns='numbers' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
_UpperCAmelCase = map(_UpperCAmelCase , function=lambda _UpperCAmelCase : None , batched=_UpperCAmelCase )
_UpperCAmelCase = map(_UpperCAmelCase , function=_UpperCAmelCase , batched=_UpperCAmelCase )
_UpperCAmelCase = filter(_UpperCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCAmelCase , 'wb' ) as f:
f.write(json.dumps(_UpperCAmelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 721
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : Dict , A : Optional[Any]=10_00_00 , A : Any=40_96 , A : Dict=1_10_08 , A : Optional[Any]=32 , A : List[Any]=32 , A : List[Any]="silu" , A : Optional[Any]=20_48 , A : Dict=0.0_2 , A : Union[str, Any]=1E-6 , A : str=True , A : List[str]=0 , A : Tuple=1 , A : str=2 , A : Dict=False , A : Any=True , A : Any=0.1 , A : Optional[Any]=0.1 , A : int=True , A : Optional[int]=True , A : Optional[int]=None , **A : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 700
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(A) for x in np.arange(30).tolist()]})
return dset
def _lowerCamelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
import faiss
_UpperCAmelCase = self._create_dummy_dataset()
_UpperCAmelCase = dset.map(
lambda A , A: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=A , keep_in_memory=A)
_UpperCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT)
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _lowerCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
import faiss
_UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
import faiss
_UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
from elasticsearch import Elasticsearch
_UpperCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
_UpperCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30)
_UpperCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
_UpperCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=A)
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
import faiss
_UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 10)
# single query
_UpperCAmelCase = np.zeros(5 , dtype=np.floataa)
_UpperCAmelCase = 1
_UpperCAmelCase , _UpperCAmelCase = index.search(A)
self.assertRaises(A , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
_UpperCAmelCase = np.eye(5 , dtype=np.floataa)[::-1]
_UpperCAmelCase , _UpperCAmelCase = index.search_batch(A)
self.assertRaises(A , index.search_batch , queries[0])
_UpperCAmelCase = [scores[0] for scores in total_scores]
_UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , A)
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
import faiss
_UpperCAmelCase = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
_UpperCAmelCase = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(A):
_UpperCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
import faiss
_UpperCAmelCase = faiss.IndexFlat(5)
_UpperCAmelCase = FaissIndex(custom_index=A)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
import faiss
_UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A) as tmp_file:
index.save(tmp_file.name)
_UpperCAmelCase = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
_UpperCAmelCase = np.zeros(5 , dtype=np.floataa)
_UpperCAmelCase = 1
_UpperCAmelCase , _UpperCAmelCase = index.search(A)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
import faiss
_UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_UpperCAmelCase = 'index.faiss'
_UpperCAmelCase = F"mock://{index_name}"
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
_UpperCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
_UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
_UpperCAmelCase = 1
_UpperCAmelCase , _UpperCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : int) -> List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
_UpperCAmelCase = Elasticsearch()
_UpperCAmelCase = {'acknowledged': True}
_UpperCAmelCase = ElasticSearchIndex(es_client=A)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
_UpperCAmelCase = 'foo'
_UpperCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search(A)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
_UpperCAmelCase = 'foo'
_UpperCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search(A , request_timeout=30)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
_UpperCAmelCase = ['foo', 'bar', 'foobar']
_UpperCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search_batch(A)
_UpperCAmelCase = [scores[0] for scores in total_scores]
_UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A) , 0)
self.assertListEqual([1, 1, 1] , A)
# batched queries with timeout
_UpperCAmelCase = ['foo', 'bar', 'foobar']
_UpperCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search_batch(A , request_timeout=30)
_UpperCAmelCase = [scores[0] for scores in total_scores]
_UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A) , 0)
self.assertListEqual([1, 1, 1] , A)
| 701
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class __lowerCAmelCase ( A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({'''summary''': Value('''string''' )} )
UpperCamelCase = '''text'''
UpperCamelCase = '''summary'''
@property
def _lowerCamelCase ( self : Optional[Any]) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 702
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 0
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
'''simple docstring'''
# Format the message.
if name is None:
_UpperCAmelCase = None
else:
_UpperCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
_UpperCAmelCase = fmt.format(_UpperCAmelCase )
# Print and recurse (if needed).
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if msg is not None:
print(_UpperCAmelCase )
for k in val.keys():
recursive_print(_UpperCAmelCase , val[k] , spaces + 2 )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
print(_UpperCAmelCase , ':' , val.size() )
else:
print(_UpperCAmelCase , ':' , _UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_UpperCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
_UpperCAmelCase = param.view(*_UpperCAmelCase )
_UpperCAmelCase = param.transpose(0 , 2 )
_UpperCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_UpperCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
_UpperCAmelCase = param.view(*_UpperCAmelCase )
_UpperCAmelCase = param.transpose(0 , 1 ).contiguous()
_UpperCAmelCase = param.view(*_UpperCAmelCase )
return param
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
# old versions did not store training args
_UpperCAmelCase = input_state_dict.get('args' , _UpperCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_UpperCAmelCase = ds_args.padded_vocab_size
_UpperCAmelCase = ds_args.max_position_embeddings
_UpperCAmelCase = ds_args.hidden_size
_UpperCAmelCase = ds_args.num_layers
_UpperCAmelCase = ds_args.num_attention_heads
_UpperCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_UpperCAmelCase = config.n_head
# The hidden_size per head.
_UpperCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_UpperCAmelCase = input_state_dict['checkpoint_version']
else:
_UpperCAmelCase = 0.0
# The model.
_UpperCAmelCase = input_state_dict['model']
# The language model.
_UpperCAmelCase = model['language_model']
# The embeddings.
_UpperCAmelCase = lm['embedding']
# The word embeddings.
_UpperCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
_UpperCAmelCase = word_embeddings[: config.vocab_size, :]
_UpperCAmelCase = word_embeddings
# The position embeddings.
_UpperCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_UpperCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
_UpperCAmelCase = pos_embeddings
# The transformer.
_UpperCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
_UpperCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
_UpperCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_UpperCAmelCase = layer_re.match(_UpperCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_UpperCAmelCase = int(m.group(1 ) )
# The name of the operation.
_UpperCAmelCase = m.group(2 )
# Is it a weight or a bias?
_UpperCAmelCase = m.group(3 )
# The name of the layer.
_UpperCAmelCase = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
_UpperCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
_UpperCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_UpperCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
_UpperCAmelCase = torch.tensor(-1E4 , dtype=torch.floataa )
_UpperCAmelCase = masked_bias
_UpperCAmelCase = fix_query_key_value_ordering(_UpperCAmelCase , _UpperCAmelCase , 3 , _UpperCAmelCase , _UpperCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_UpperCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
_UpperCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_UpperCAmelCase = fix_query_key_value_ordering(_UpperCAmelCase , _UpperCAmelCase , 3 , _UpperCAmelCase , _UpperCAmelCase )
# Store. No change of shape.
_UpperCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_UpperCAmelCase = megatron_to_transformers[op_name]
_UpperCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_UpperCAmelCase = megatron_to_transformers[op_name]
_UpperCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_UpperCAmelCase = transformer['final_layernorm.weight']
_UpperCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
_UpperCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_UpperCAmelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_UpperCAmelCase , help='An optional config json file describing the pre-trained model.' , )
_UpperCAmelCase = parser.parse_args()
# Extract the basename.
_UpperCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
else:
_UpperCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
_UpperCAmelCase = input_state_dict.get('args' , _UpperCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_UpperCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
_UpperCAmelCase = 'gelu_new'
else:
_UpperCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
_UpperCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
_UpperCAmelCase = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_UpperCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_UpperCAmelCase , summary_activation=_UpperCAmelCase , summary_proj_to_labels=_UpperCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=_UpperCAmelCase , use_cache=_UpperCAmelCase , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_UpperCAmelCase = GPTaConfig.from_json_file(args.config_file )
_UpperCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
_UpperCAmelCase = convert_megatron_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_UpperCAmelCase , _UpperCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_UpperCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_UpperCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
_UpperCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
_UpperCAmelCase = 'gpt2'
_UpperCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = type(_UpperCAmelCase ).__name__
_UpperCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_UpperCAmelCase )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_UpperCAmelCase )
# Store the state_dict to file.
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'pytorch_model.bin' )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 703
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 0
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase__ = re.compile(r"\b(a|an|the)\b", re.UNICODE)
UpperCAmelCase__ = None
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def A ( _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_UpperCAmelCase = bool(qa['answers']['text'] )
return qid_to_has_ans
def A ( _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : List[str] ):
return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : List[str] ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
if not s:
return []
return normalize_answer(_UpperCAmelCase ).split()
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
return int(normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = get_tokens(_UpperCAmelCase )
_UpperCAmelCase = get_tokens(_UpperCAmelCase )
_UpperCAmelCase = collections.Counter(_UpperCAmelCase ) & collections.Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_UpperCAmelCase = qa['id']
_UpperCAmelCase = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_UpperCAmelCase = ['']
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
_UpperCAmelCase = preds[qid]
# Take max over all gold answers
_UpperCAmelCase = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase ) for a in gold_answers )
_UpperCAmelCase = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
for qid, s in scores.items():
_UpperCAmelCase = na_probs[qid] > na_prob_thresh
if pred_na:
_UpperCAmelCase = float(not qid_to_has_ans[qid] )
else:
_UpperCAmelCase = s
return new_scores
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int=None ) -> Any:
'''simple docstring'''
if not qid_list:
_UpperCAmelCase = len(_UpperCAmelCase )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
_UpperCAmelCase = len(_UpperCAmelCase )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
for k in new_eval:
_UpperCAmelCase = new_eval[k]
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_UpperCAmelCase )
plt.savefig(_UpperCAmelCase )
plt.clf()
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : na_probs[k] )
_UpperCAmelCase = 0.0
_UpperCAmelCase = 1.0
_UpperCAmelCase = 0.0
_UpperCAmelCase = [1.0]
_UpperCAmelCase = [0.0]
_UpperCAmelCase = 0.0
for i, qid in enumerate(_UpperCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_UpperCAmelCase = true_pos / float(i + 1 )
_UpperCAmelCase = true_pos / float(_UpperCAmelCase )
if i == len(_UpperCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_UpperCAmelCase )
recalls.append(_UpperCAmelCase )
if out_image:
plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return {"ap": 100.0 * avg_prec}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
if out_image_dir and not os.path.exists(_UpperCAmelCase ):
os.makedirs(_UpperCAmelCase )
_UpperCAmelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_UpperCAmelCase = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
_UpperCAmelCase = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
_UpperCAmelCase = {k: float(_UpperCAmelCase ) for k, v in qid_to_has_ans.items()}
_UpperCAmelCase = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact' )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1' )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle' )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
if not qid_list:
return
_UpperCAmelCase = [na_probs[k] for k in qid_list]
_UpperCAmelCase = np.ones_like(_UpperCAmelCase ) / float(len(_UpperCAmelCase ) )
plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_UpperCAmelCase , F"na_prob_hist_{name}.png" ) )
plt.clf()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
_UpperCAmelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_UpperCAmelCase = num_no_ans
_UpperCAmelCase = cur_score
_UpperCAmelCase = 0.0
_UpperCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : na_probs[k] )
for i, qid in enumerate(_UpperCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_UpperCAmelCase = scores[qid]
else:
if preds[qid]:
_UpperCAmelCase = -1
else:
_UpperCAmelCase = 0
cur_score += diff
if cur_score > best_score:
_UpperCAmelCase = cur_score
_UpperCAmelCase = na_probs[qid]
return 100.0 * best_score / len(_UpperCAmelCase ), best_thresh
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = best_exact
_UpperCAmelCase = exact_thresh
_UpperCAmelCase = best_fa
_UpperCAmelCase = fa_thresh
def A ( ) -> Optional[Any]:
'''simple docstring'''
with open(OPTS.data_file ) as f:
_UpperCAmelCase = json.load(_UpperCAmelCase )
_UpperCAmelCase = dataset_json['data']
with open(OPTS.pred_file ) as f:
_UpperCAmelCase = json.load(_UpperCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_UpperCAmelCase = json.load(_UpperCAmelCase )
else:
_UpperCAmelCase = {k: 0.0 for k in preds}
_UpperCAmelCase = make_qid_to_has_ans(_UpperCAmelCase ) # maps qid to True/False
_UpperCAmelCase = [k for k, v in qid_to_has_ans.items() if v]
_UpperCAmelCase = [k for k, v in qid_to_has_ans.items() if not v]
_UpperCAmelCase , _UpperCAmelCase = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh )
_UpperCAmelCase = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh )
_UpperCAmelCase = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase )
if has_ans_qids:
_UpperCAmelCase = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns' )
if no_ans_qids:
_UpperCAmelCase = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir )
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
else:
print(json.dumps(_UpperCAmelCase , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 704
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( A ):
def __init__( self : int , A : VQModel , A : UNetaDModel , A : DDIMScheduler) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=A , unet=A , scheduler=A)
@torch.no_grad()
def __call__( self : Optional[int] , A : int = 1 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : float = 0.0 , A : int = 50 , A : Optional[str] = "pil" , A : bool = True , **A : Union[str, Any] , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_UpperCAmelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A , )
_UpperCAmelCase = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_UpperCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(self.scheduler.timesteps):
_UpperCAmelCase = self.scheduler.scale_model_input(A , A)
# predict the noise residual
_UpperCAmelCase = self.unet(A , A).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(A , A , A , **A).prev_sample
# decode the image latents with the VAE
_UpperCAmelCase = self.vqvae.decode(A).sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(A)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A)
| 706
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 0
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 5_00
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A) as mock_head:
_UpperCAmelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 5_00
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = GPTaTokenizerFast.from_pretrained('gpt2')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A) as mock_head:
_UpperCAmelCase = GPTaTokenizerFast.from_pretrained('gpt2')
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = tempfile.mktemp()
with open(A , 'wb') as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A)
_UpperCAmelCase = AlbertTokenizer.from_pretrained(A)
finally:
os.remove(A)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json'):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb') as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A)
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json')
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model')
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TOKEN
HfFolder.save_token(A)
@classmethod
def _lowerCamelCase ( cls : List[str]) -> Dict:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer')
except HTTPError:
pass
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(A , 'vocab.txt')
with open(A , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCAmelCase = BertTokenizer(A)
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token)
_UpperCAmelCase = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A , repo_id='test-tokenizer' , push_to_hub=A , use_auth_token=self._token)
_UpperCAmelCase = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def _lowerCamelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(A , 'vocab.txt')
with open(A , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCAmelCase = BertTokenizer(A)
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token)
_UpperCAmelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A , use_auth_token=self._token)
_UpperCAmelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(A , 'vocab.txt')
with open(A , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCAmelCase = CustomTokenizer(A)
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
_UpperCAmelCase = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=A)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(A , 'vocab.txt')
with open(A , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCAmelCase = BertTokenizerFast.from_pretrained(A)
bert_tokenizer.save_pretrained(A)
_UpperCAmelCase = CustomTokenizerFast.from_pretrained(A)
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
_UpperCAmelCase = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=A)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast')
_UpperCAmelCase = AutoTokenizer.from_pretrained(
F"{USER}/test-dynamic-tokenizer" , use_fast=A , trust_remote_code=A)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = Trie()
trie.add('Hello 友達')
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}})
trie.add('Hello')
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}})
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS] This is a extra_id_100'])
trie.add('[CLS]')
trie.add('extra_id_1')
trie.add('extra_id_100')
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS]', ' This is a ', 'extra_id_100'])
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Trie()
trie.add('A')
self.assertEqual(trie.split('ABC') , ['A', 'BC'])
self.assertEqual(trie.split('BCA') , ['BC', 'A'])
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = Trie()
trie.add('TOKEN]')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = Trie()
trie.add('A')
trie.add('P')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def _lowerCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Trie()
trie.add('AB')
trie.add('B')
trie.add('C')
self.assertEqual(trie.split('ABC') , ['AB', 'C'])
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = Trie()
trie.add('ABC')
trie.add('B')
trie.add('CD')
self.assertEqual(trie.split('ABCD') , ['ABC', 'D'])
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = Trie()
_UpperCAmelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3])
self.assertEqual(A , ['AB', 'C'])
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 0
|
import socket
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_UpperCAmelCase = socket.gethostname()
_UpperCAmelCase = 12_312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_UpperCAmelCase = sock.recv(1_024 )
if not data:
break
out_file.write(_UpperCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 708
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 0
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __lowerCAmelCase ( A ):
UpperCamelCase = 4_2
UpperCamelCase = None
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Any=0.999 , _UpperCAmelCase : int="cosine" , ) -> str:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_UpperCAmelCase = []
for i in range(_UpperCAmelCase ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class __lowerCAmelCase ( A , A ):
@register_to_config
def __init__( self : int , A : int = 10_00 , A : str = "fixed_small_log" , A : bool = True , A : Optional[float] = 1.0 , A : str = "epsilon" , A : str = "squaredcos_cap_v2" , ) -> Any:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'')
_UpperCAmelCase = betas_for_alpha_bar(A)
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0)
_UpperCAmelCase = torch.tensor(1.0)
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , A)[::-1].copy())
_UpperCAmelCase = variance_type
def _lowerCamelCase ( self : Optional[int] , A : torch.FloatTensor , A : Optional[int] = None) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _lowerCamelCase ( self : Optional[int] , A : int , A : Union[str, torch.device] = None) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_UpperCAmelCase = (np.arange(0 , A) * step_ratio).round()[::-1].copy().astype(np.intaa)
_UpperCAmelCase = torch.from_numpy(A).to(A)
def _lowerCamelCase ( self : Tuple , A : List[str] , A : Dict=None , A : Optional[Any]=None , A : Union[str, Any]=None) -> List[str]:
"""simple docstring"""
if prev_timestep is None:
_UpperCAmelCase = t - 1
_UpperCAmelCase = self.alphas_cumprod[t]
_UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_UpperCAmelCase = 1 - alpha_prod_t
_UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_UpperCAmelCase = self.betas[t]
else:
_UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_UpperCAmelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_UpperCAmelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_UpperCAmelCase = torch.log(torch.clamp(A , min=1E-20))
_UpperCAmelCase = torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_UpperCAmelCase = variance.log()
_UpperCAmelCase = beta.log()
_UpperCAmelCase = (predicted_variance + 1) / 2
_UpperCAmelCase = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCamelCase ( self : int , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : Optional[int] = None , A : Any=None , A : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_UpperCAmelCase , _UpperCAmelCase = torch.split(A , sample.shape[1] , dim=1)
else:
_UpperCAmelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
_UpperCAmelCase = t - 1
_UpperCAmelCase = self.alphas_cumprod[t]
_UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_UpperCAmelCase = 1 - alpha_prod_t
_UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_UpperCAmelCase = self.betas[t]
_UpperCAmelCase = self.alphas[t]
else:
_UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
_UpperCAmelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
' for the UnCLIPScheduler.')
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = torch.clamp(
A , -self.config.clip_sample_range , self.config.clip_sample_range)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCAmelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_UpperCAmelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCAmelCase = 0
if t > 0:
_UpperCAmelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=A , device=model_output.device)
_UpperCAmelCase = self._get_variance(
A , predicted_variance=A , prev_timestep=A , )
if self.variance_type == "fixed_small_log":
_UpperCAmelCase = variance
elif self.variance_type == "learned_range":
_UpperCAmelCase = (0.5 * variance).exp()
else:
raise ValueError(
F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
' for the UnCLIPScheduler.')
_UpperCAmelCase = variance * variance_noise
_UpperCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=A , pred_original_sample=A)
def _lowerCamelCase ( self : Optional[Any] , A : torch.FloatTensor , A : torch.FloatTensor , A : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
_UpperCAmelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype)
_UpperCAmelCase = timesteps.to(original_samples.device)
_UpperCAmelCase = alphas_cumprod[timesteps] ** 0.5
_UpperCAmelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
_UpperCAmelCase = sqrt_alpha_prod.unsqueeze(-1)
_UpperCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCAmelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
_UpperCAmelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1)
_UpperCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 709
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 710
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class __lowerCAmelCase ( A ):
UpperCamelCase = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''audio''': Audio()} )
UpperCamelCase = Features({'''transcription''': Value('''string''' )} )
UpperCamelCase = '''audio'''
UpperCamelCase = '''transcription'''
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any]) -> List[str]:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , A):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
_UpperCAmelCase = copy.deepcopy(self)
_UpperCAmelCase = self.input_schema.copy()
_UpperCAmelCase = features[self.audio_column]
_UpperCAmelCase = input_schema
return task_template
@property
def _lowerCamelCase ( self : str) -> Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 711
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 0
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(A , 'num_attention_heads'))
self.parent.assertTrue(hasattr(A , 'num_encoder_blocks'))
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Dict , A : List[str]=13 , A : Any=64 , A : Dict=3 , A : Union[str, Any]=4 , A : Optional[int]=[2, 2, 2, 2] , A : Tuple=[8, 4, 2, 1] , A : Union[str, Any]=[16, 32, 64, 1_28] , A : str=[1, 4, 8, 16] , A : List[str]=[1, 2, 4, 8] , A : Tuple=True , A : str=True , A : int="gelu" , A : int=0.1 , A : List[Any]=0.1 , A : Dict=0.0_2 , A : str=3 , A : Dict=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_encoder_blocks
_UpperCAmelCase = sr_ratios
_UpperCAmelCase = depths
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = downsampling_rates
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , A : Union[str, Any] , A : str , A : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = SegformerModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
_UpperCAmelCase = _UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def _lowerCamelCase ( self : Any , A : Dict , A : int , A : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SegformerForSemanticSegmentation(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def _lowerCamelCase ( self : Optional[Any] , A : Optional[Any] , A : Dict , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = SegformerForSemanticSegmentation(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(A)
_UpperCAmelCase = model(A , labels=A)
self.parent.assertGreater(result.loss , 0.0)
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCAmelCase = SegformerModelTester(self)
_UpperCAmelCase = SegformerConfigTester(self , config_class=A)
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A)
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A)
@unittest.skip('SegFormer does not use inputs_embeds')
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
pass
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
def _lowerCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.attentions
_UpperCAmelCase = sum(self.model_tester.depths)
self.assertEqual(len(A) , A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(A) , A)
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_UpperCAmelCase = len(A)
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
self.assertEqual(out_len + 1 , len(A))
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(A) , A)
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(A : str , A : List[Any] , A : str):
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(A) , A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(A):
continue
_UpperCAmelCase = model_class(A)
model.to(A)
model.train()
_UpperCAmelCase = self._prepare_for_class(A , A , return_labels=A)
_UpperCAmelCase = model(**A).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SegformerModel.from_pretrained(A)
self.assertIsNotNone(A)
def A ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A , align=A , do_random_crop=A)
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
A)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = encoded_inputs.pixel_values.to(A)
with torch.no_grad():
_UpperCAmelCase = model(A)
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-4))
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A , align=A , do_random_crop=A)
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(A)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = encoded_inputs.pixel_values.to(A)
with torch.no_grad():
_UpperCAmelCase = model(A)
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-1))
@slow
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A , align=A , do_random_crop=A)
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
A)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = encoded_inputs.pixel_values.to(A)
with torch.no_grad():
_UpperCAmelCase = model(A)
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(5_00, 3_00)])
_UpperCAmelCase = torch.Size((5_00, 3_00))
self.assertEqual(segmentation[0].shape , A)
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A)
_UpperCAmelCase = torch.Size((1_28, 1_28))
self.assertEqual(segmentation[0].shape , A)
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(_UpperCAmelCase )
while len(_UpperCAmelCase ) != 1:
_UpperCAmelCase = [int(_UpperCAmelCase ) for i in num_string]
_UpperCAmelCase = 1
for i in range(0 , len(_UpperCAmelCase ) ):
total *= numbers[i]
_UpperCAmelCase = str(_UpperCAmelCase )
steps += 1
return steps
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(_UpperCAmelCase )
while len(_UpperCAmelCase ) != 1:
_UpperCAmelCase = [int(_UpperCAmelCase ) for i in num_string]
_UpperCAmelCase = 0
for i in range(0 , len(_UpperCAmelCase ) ):
total += numbers[i]
_UpperCAmelCase = str(_UpperCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
UpperCAmelCase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase__ = [None] * 1000_0000
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCAmelCase = chain(next_number(_UpperCAmelCase ) )
_UpperCAmelCase = number_chain
while number < 10_000_000:
_UpperCAmelCase = number_chain
number *= 10
return number_chain
def A ( _UpperCAmelCase : int = 10_000_000 ) -> int:
'''simple docstring'''
for i in range(1 , _UpperCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 714
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 0
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __lowerCAmelCase :
def __init__( self : int , A : Optional[Any]=2 , A : List[str]=3 , A : Optional[int]=64 , A : Union[str, Any]=None) -> Dict:
"""simple docstring"""
_UpperCAmelCase = np.random.default_rng(A)
_UpperCAmelCase = length
_UpperCAmelCase = rng.normal(size=(length,)).astype(np.floataa)
_UpperCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__( self : Tuple) -> List[Any]:
"""simple docstring"""
return self.length
def __getitem__( self : Union[str, Any] , A : Tuple) -> List[Any]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __lowerCAmelCase ( torch.nn.Module ):
def __init__( self : int , A : List[str]=0 , A : Any=0 , A : Optional[int]=False) -> Tuple:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = torch.nn.Parameter(torch.tensor([2, 3]).float())
_UpperCAmelCase = torch.nn.Parameter(torch.tensor([2, 3]).float())
_UpperCAmelCase = True
def _lowerCamelCase ( self : int , A : Dict=None) -> int:
"""simple docstring"""
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
_UpperCAmelCase = False
return x * self.a[0] + self.b[0]
class __lowerCAmelCase ( torch.nn.Module ):
def __init__( self : Any , A : Optional[Any]=0 , A : Any=0 , A : List[Any]=False) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = torch.nn.Parameter(torch.tensor(A).float())
_UpperCAmelCase = torch.nn.Parameter(torch.tensor(A).float())
_UpperCAmelCase = True
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int]=None) -> str:
"""simple docstring"""
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
_UpperCAmelCase = False
return x * self.a + self.b
def _A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : int = 16 ) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
_UpperCAmelCase = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase )
_UpperCAmelCase = datasets['train'].unique('label' )
_UpperCAmelCase = {v: i for i, v in enumerate(_UpperCAmelCase )}
def tokenize_function(_UpperCAmelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
if "label" in examples:
_UpperCAmelCase = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_UpperCAmelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=2 )
_UpperCAmelCase = DataLoader(tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 715
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
# Load configuration defined in the metadata file
with open(_UpperCAmelCase ) as metadata_file:
_UpperCAmelCase = json.load(_UpperCAmelCase )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_UpperCAmelCase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )['module']
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_UpperCAmelCase )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken('<ent>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_UpperCAmelCase = AddedToken('<ent2>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'r' ) as f:
_UpperCAmelCase = json.load(_UpperCAmelCase )
_UpperCAmelCase = 'MLukeTokenizer'
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_UpperCAmelCase )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['@'] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['#'] )[0]
_UpperCAmelCase = state_dict['embeddings.word_embeddings.weight']
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = F"encoder.layer.{layer_index}.attention.self."
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict['entity_embeddings.entity_embeddings.weight']
_UpperCAmelCase = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict['entity_predictions.bias']
_UpperCAmelCase = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_UpperCAmelCase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if set(_UpperCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(_UpperCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_UpperCAmelCase , task='entity_classification' )
_UpperCAmelCase = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_UpperCAmelCase , entity_spans=[span] , return_tensors='pt' )
_UpperCAmelCase = model(**_UpperCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = 'Tokyo is the capital of <mask>.'
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_UpperCAmelCase , entity_spans=[span] , return_tensors='pt' )
_UpperCAmelCase = model(**_UpperCAmelCase )
_UpperCAmelCase = encoding['input_ids'][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_UpperCAmelCase )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(_UpperCAmelCase ) )
model.save_pretrained(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = ['[MASK]', '[PAD]', '[UNK]']
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in open(_UpperCAmelCase )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = F"{language}:{entity_name}"
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
UpperCAmelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 716
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 0
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __lowerCAmelCase ( A ):
def __init__( self : int , A : int = 1_01) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = length
def __len__( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
return self.length
def __getitem__( self : Tuple , A : Union[str, Any]) -> int:
"""simple docstring"""
return i
class __lowerCAmelCase :
def __call__( self : Optional[Any] , A : str) -> Union[str, Any]:
"""simple docstring"""
return {"input_ids": torch.tensor(A), "labels": torch.tensor(A)}
class __lowerCAmelCase ( nn.Module ):
def __init__( self : int) -> Tuple:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_UpperCAmelCase = nn.Linear(1_20 , 80)
def _lowerCamelCase ( self : int , A : str , A : Tuple=None) -> List[str]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class __lowerCAmelCase ( A ):
@require_torch_neuroncore
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F"--output_dir {output_dir}".split()
_UpperCAmelCase = ['torchrun'] + distributed_args + args
execute_subprocess_async(A , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class __lowerCAmelCase ( A ):
@require_torch_multi_gpu
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F"--output_dir {output_dir}".split()
_UpperCAmelCase = ['torchrun'] + distributed_args + args
execute_subprocess_async(A , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCAmelCase__ : str = HfArgumentParser((TrainingArguments,))
UpperCAmelCase__ : Dict = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
UpperCAmelCase__ : Any = DummyDataset(dataset_length)
def A ( _UpperCAmelCase : EvalPrediction ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = list(range(len(_UpperCAmelCase ) ) )
_UpperCAmelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
UpperCAmelCase__ : Optional[int] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCAmelCase__ : List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase__ : str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase__ : Union[str, Any] = 2
UpperCAmelCase__ : Dict = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase__ : Tuple = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase__ : List[str] = None
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCAmelCase__ = TypeVar("KT")
UpperCAmelCase__ = TypeVar("VT")
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self : Optional[int] , A : KT | str = "root" , A : VT | None = None) -> str:
"""simple docstring"""
_UpperCAmelCase = key
_UpperCAmelCase = value
_UpperCAmelCase = []
def __repr__( self : Optional[int]) -> str:
"""simple docstring"""
return F"Node({self.key}: {self.value})"
@property
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
return len(self.forward)
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self : Dict , A : float = 0.5 , A : int = 16) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = Node[KT, VT]()
_UpperCAmelCase = 0
_UpperCAmelCase = p
_UpperCAmelCase = max_level
def __str__( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = list(self)
if len(A) == 0:
return F"SkipList(level={self.level})"
_UpperCAmelCase = max((len(str(A)) for item in items) , default=4)
_UpperCAmelCase = max(A , 4) + 4
_UpperCAmelCase = self.head
_UpperCAmelCase = []
_UpperCAmelCase = node.forward.copy()
lines.append(F"[{node.key}]".ljust(A , '-') + '* ' * len(A))
lines.append(' ' * label_size + '| ' * len(A))
while len(node.forward) != 0:
_UpperCAmelCase = node.forward[0]
lines.append(
F"[{node.key}]".ljust(A , '-')
+ ' '.join(str(n.key) if n.key == node.key else '|' for n in forwards))
lines.append(' ' * label_size + '| ' * len(A))
_UpperCAmelCase = node.forward
lines.append('None'.ljust(A) + '* ' * len(A))
return F"SkipList(level={self.level})\n" + "\n".join(A)
def __iter__( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.head
while len(node.forward) != 0:
yield node.forward[0].key
_UpperCAmelCase = node.forward[0]
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : List[str] , A : str) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_UpperCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : List[str] , A : KT) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self._locate_node(A)
if node is not None:
for i, update_node in enumerate(A):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_UpperCAmelCase = node.forward[i]
else:
_UpperCAmelCase = update_node.forward[:i]
def _lowerCamelCase ( self : List[Any] , A : KT , A : VT) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self._locate_node(A)
if node is not None:
_UpperCAmelCase = value
else:
_UpperCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A):
update_vector.append(self.head)
_UpperCAmelCase = level
_UpperCAmelCase = Node(A , A)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(A)
else:
_UpperCAmelCase = new_node
def _lowerCamelCase ( self : List[Any] , A : VT) -> VT | None:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self._locate_node(A)
if node is not None:
return node.value
return None
def A ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
_UpperCAmelCase = skip_list.head
_UpperCAmelCase = {}
while node.level != 0:
_UpperCAmelCase = node.forward[0]
_UpperCAmelCase = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
_UpperCAmelCase = skip_list.head
_UpperCAmelCase = {}
while node.level != 0:
_UpperCAmelCase = node.forward[0]
_UpperCAmelCase = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = SkipList()
assert skip_list.find('Some key' ) is None
def A ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def A ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def A ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def A ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(_UpperCAmelCase : Optional[Any] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A ( ) -> List[Any]:
'''simple docstring'''
def is_sorted(_UpperCAmelCase : Union[str, Any] ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
_UpperCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def A ( ) -> int:
'''simple docstring'''
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 718
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 0
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 719
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BlipImageProcessor'''
UpperCamelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : str , A : str , A : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(A , A)
_UpperCAmelCase = self.image_processor
def __call__( self : Union[str, Any] , A : ImageInput = None , A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A : bool = True , A : Union[bool, str, PaddingStrategy] = False , A : Union[bool, str, TruncationStrategy] = None , A : Optional[int] = None , A : int = 0 , A : Optional[int] = None , A : Optional[bool] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : Optional[Union[str, TensorType]] = None , **A : int , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
return text_encoding
# add pixel_values
_UpperCAmelCase = self.image_processor(A , return_tensors=A)
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(A)
return encoding_image_processor
def _lowerCamelCase ( self : Optional[int] , *A : Union[str, Any] , **A : Optional[Any]) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*A , **A)
def _lowerCamelCase ( self : str , *A : str , **A : Tuple) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*A , **A)
@property
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 720
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''funnel'''
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Any , A : Any=3_05_22 , A : Tuple=[4, 4, 4] , A : Tuple=None , A : Union[str, Any]=2 , A : int=7_68 , A : Union[str, Any]=12 , A : Any=64 , A : List[str]=30_72 , A : List[str]="gelu_new" , A : List[str]=0.1 , A : Union[str, Any]=0.1 , A : str=0.0 , A : Union[str, Any]=0.1 , A : Tuple=None , A : List[str]=1E-9 , A : Union[str, Any]="mean" , A : List[str]="relative_shift" , A : Dict=True , A : List[str]=True , A : Dict=True , **A : Tuple , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = block_sizes
_UpperCAmelCase = [1] * len(A) if block_repeats is None else block_repeats
assert len(A) == len(
self.block_repeats), "`block_sizes` and `block_repeats` should have the same length."
_UpperCAmelCase = num_decoder_layers
_UpperCAmelCase = d_model
_UpperCAmelCase = n_head
_UpperCAmelCase = d_head
_UpperCAmelCase = d_inner
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_std
_UpperCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
_UpperCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
_UpperCAmelCase = attention_type
_UpperCAmelCase = separate_cls
_UpperCAmelCase = truncate_seq
_UpperCAmelCase = pool_q_only
super().__init__(**A)
@property
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
return sum(self.block_sizes)
@num_hidden_layers.setter
def _lowerCamelCase ( self : Optional[int] , A : Dict) -> Dict:
"""simple docstring"""
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.')
@property
def _lowerCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
return len(self.block_sizes)
@num_blocks.setter
def _lowerCamelCase ( self : Union[str, Any] , A : int) -> List[Any]:
"""simple docstring"""
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.')
| 721
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''roformer'''
def __init__( self : Dict , A : int=5_00_00 , A : Optional[int]=None , A : Optional[int]=7_68 , A : int=12 , A : Union[str, Any]=12 , A : List[Any]=30_72 , A : str="gelu" , A : List[str]=0.1 , A : str=0.1 , A : Optional[Any]=15_36 , A : Tuple=2 , A : int=0.0_2 , A : List[Any]=1E-12 , A : Dict=0 , A : Tuple=False , A : List[Any]=True , **A : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A , **A)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = rotary_value
_UpperCAmelCase = use_cache
class __lowerCAmelCase ( A ):
@property
def _lowerCamelCase ( self : Union[str, Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 700
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 0
|
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str = "AAPL" ) -> str:
'''simple docstring'''
_UpperCAmelCase = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase ).text , 'html.parser' )
_UpperCAmelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 701
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
UpperCAmelCase__ : int = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
UpperCAmelCase__ : List[str] = "</w>"
UpperCAmelCase__ : Optional[int] = "@@ "
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase__ : Optional[Any] = {"facebook/s2t-wav2vec2-large-en-de": 1024}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , A : List[Any] , A : Tuple="<s>" , A : List[Any]="<pad>" , A : Optional[Any]="</s>" , A : Optional[int]="<unk>" , A : Tuple=False , A : int=None , **A : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(
unk_token=A , bos_token=A , eos_token=A , pad_token=A , do_lower_case=A , **A , )
_UpperCAmelCase = do_lower_case
with open(A , encoding='utf-8') as vocab_handle:
_UpperCAmelCase = json.load(A)
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_UpperCAmelCase = None
_UpperCAmelCase = None
else:
with open(A , encoding='utf-8') as merges_handle:
_UpperCAmelCase = merges_handle.read().split('\n')[:-1]
_UpperCAmelCase = [tuple(merge.split()[:2]) for merge in merges]
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = {}
@property
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
return len(self.decoder)
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self : str , A : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = get_pairs(A)
if not pairs:
return token
while True:
_UpperCAmelCase = min(A , key=lambda A: self.bpe_ranks.get(A , float('inf')))
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(A):
try:
_UpperCAmelCase = word.index(A , A)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_UpperCAmelCase = j
if word[i] == first and i < len(A) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCAmelCase = tuple(A)
_UpperCAmelCase = new_word
if len(A) == 1:
break
else:
_UpperCAmelCase = get_pairs(A)
_UpperCAmelCase = ' '.join(A)
if word == "\n " + BPE_TOKEN_MERGES:
_UpperCAmelCase = '\n' + BPE_TOKEN_MERGES
if word.endswith(A):
_UpperCAmelCase = word.replace(A , '')
_UpperCAmelCase = word.replace(' ' , A)
_UpperCAmelCase = word
return word
def _lowerCamelCase ( self : Any , A : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.')
if self.do_lower_case:
_UpperCAmelCase = text.lower()
_UpperCAmelCase = text.split()
_UpperCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(A).split(' ')))
return split_tokens
def _lowerCamelCase ( self : Tuple , A : str) -> int:
"""simple docstring"""
return self.encoder.get(A , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self : str , A : int) -> str:
"""simple docstring"""
_UpperCAmelCase = self.decoder.get(A , self.unk_token)
return result
def _lowerCamelCase ( self : Tuple , A : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = ' '.join(A)
# make sure @@ tokens are concatenated
_UpperCAmelCase = ''.join(string.split(A))
return string
def _lowerCamelCase ( self : Any , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(A , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A) + '\n')
_UpperCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(A , 'w' , encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!')
_UpperCAmelCase = token_index
writer.write(' '.join(A) + '\n')
index += 1
return (vocab_file, merges_file)
| 702
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Tuple , A : bool = True , A : Dict[str, int] = None , A : float = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 3_84}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : str , A : np.ndarray , A : Dict[str, int] , A : float , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Any , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
_UpperCAmelCase = size['shortest_edge']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCAmelCase = int(shortest_edge / crop_pct)
_UpperCAmelCase = get_resize_output_image_size(A , size=A , default_to_square=A)
_UpperCAmelCase = resize(image=A , size=A , resample=A , data_format=A , **A)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=A , size=(shortest_edge, shortest_edge) , data_format=A , **A)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
A , size=(shortest_edge, shortest_edge) , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : int , ) -> Optional[Any]:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : Optional[Any] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Dict , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : float = None , A : PILImageResampling = None , A : bool = None , A : float = None , A : bool = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , crop_pct=A , resample=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
| 703
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 0
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , A : int , A : Tuple=13 , A : List[str]=32 , A : Union[str, Any]=2 , A : Union[str, Any]=3 , A : Union[str, Any]=16 , A : List[Any]=[1, 2, 1] , A : Union[str, Any]=[2, 2, 4] , A : Optional[Any]=2 , A : List[Any]=2.0 , A : Tuple=True , A : Any=0.0 , A : List[Any]=0.0 , A : Tuple=0.1 , A : str="gelu" , A : int=False , A : List[str]=True , A : Optional[int]=0.0_2 , A : Optional[Any]=1E-5 , A : Optional[Any]=True , A : Dict=None , A : Dict=True , A : Any=10 , A : str=8 , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[Any] , A : List[Any] , A : int , A : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = SwinvaModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self : int , A : Optional[Any] , A : Any , A : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = SwinvaForMaskedImageModeling(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = SwinvaForMaskedImageModeling(A)
model.to(A)
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : Optional[Any] , A : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = SwinvaForImageClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : Any) -> int:
"""simple docstring"""
_UpperCAmelCase = SwinvaModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , embed_dim=37)
def _lowerCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear))
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.attentions
_UpperCAmelCase = len(self.model_tester.depths)
self.assertEqual(len(A) , A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = config.window_size**2
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(A) , A)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_UpperCAmelCase = len(A)
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
if hasattr(self.model_tester , 'num_hidden_states_types'):
_UpperCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_UpperCAmelCase = 2
self.assertEqual(out_len + added_hidden_states , len(A))
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(A) , A)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : str , A : str , A : Union[str, Any] , A : Optional[Any] , A : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(A) , A)
# Swinv2 has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(A) , A)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reshaped_hidden_states[0].shape
_UpperCAmelCase = (
reshaped_hidden_states[0].view(A , A , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(A , A , A , A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(A , A , A , A)
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width))
def _lowerCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A)
@slow
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SwinvaModel.from_pretrained(A)
self.assertIsNotNone(A)
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(A)
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=A)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
A)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A)
# verify the logits
_UpperCAmelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4))
| 704
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = []
def parse_line(_UpperCAmelCase : Tuple ):
for line in fp:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCAmelCase ) > 0:
_UpperCAmelCase = '\n'.join(_UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(_UpperCAmelCase )
buffer.clear()
continue
else:
_UpperCAmelCase = line.strip()
buffer.append(_UpperCAmelCase )
if from_gh:
for filename in os.listdir(_UpperCAmelCase ):
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
else:
try:
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def A ( _UpperCAmelCase : int , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCAmelCase , _UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def A ( _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return values.split(',' )
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase__ = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 705
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCAmelCase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def A ( _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {}
with open(_UpperCAmelCase , 'r' ) as file:
for line_number, line in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = line.strip()
if line:
_UpperCAmelCase = line.split()
_UpperCAmelCase = line_number
_UpperCAmelCase = words[0]
_UpperCAmelCase = value
return result
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCAmelCase ):
_UpperCAmelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
_UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
_UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split('.' ):
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
_UpperCAmelCase = value[0]
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCAmelCase ):
_UpperCAmelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
_UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
_UpperCAmelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_UpperCAmelCase = '.'.join([key, hf_param_name] )
else:
_UpperCAmelCase = key
_UpperCAmelCase = value if 'lm_head' in full_key else value[0]
UpperCAmelCase__ = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Union[str, Any]=None ) -> str:
'''simple docstring'''
_UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(_UpperCAmelCase )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , _UpperCAmelCase )
if "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "bias" in name:
_UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = 'weight'
else:
_UpperCAmelCase = None
if hf_dict is not None:
rename_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return is_used
return is_used
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
_UpperCAmelCase = load_wavaveca_layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Union[str, Any]=False ) -> List[str]:
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_UpperCAmelCase )
else:
_UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
_UpperCAmelCase = read_txt_into_dict(_UpperCAmelCase )
_UpperCAmelCase = idalabel
_UpperCAmelCase = WavaVecaForSequenceClassification(_UpperCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
feature_extractor.save_pretrained(_UpperCAmelCase )
elif is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load(_UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCAmelCase ) )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCAmelCase = 0
_UpperCAmelCase = 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = WavaVecaCTCTokenizer(
_UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCAmelCase , )
_UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
_UpperCAmelCase = WavaVecaForCTC(_UpperCAmelCase )
else:
_UpperCAmelCase = WavaVecaForPreTraining(_UpperCAmelCase )
if is_finetuned or is_seq_class:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_UpperCAmelCase = argparse.Namespace(task='audio_pretraining' )
_UpperCAmelCase = fairseq.tasks.setup_task(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCAmelCase )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 706
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 0
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
def __init__( self : List[str] , A : Any , A : Optional[Any]=99 , A : Tuple=13 , A : Optional[Any]=7 , A : Tuple=9 , A : List[Any]=True , A : Union[str, Any]=True , A : Optional[Any]=False , A : Dict=32 , A : Tuple=5 , A : List[Any]=4 , A : Union[str, Any]=37 , A : Dict=8 , A : List[Any]=0.1 , A : int=0.0_0_2 , A : str=1 , A : Union[str, Any]=0 , A : Dict=0 , A : Union[str, Any]=None , A : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base')
def _lowerCamelCase ( self : Tuple , A : int , A : Union[str, Any] , A : List[str] , A : Any=None , A : List[Any]=None , A : List[Any]=None , A : int=None , A : Optional[Any]=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=A)
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=A)
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=A)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1)
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1)
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(A , A , A)
return config, input_dict
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCamelCase ( self : str , A : List[Any] , A : Any , A : List[str] , A : str , A : Tuple , A : str , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(
input_ids=A , decoder_input_ids=A , attention_mask=A , decoder_attention_mask=A , )
_UpperCAmelCase = model(input_ids=A , decoder_input_ids=A)
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(A) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any] , A : Tuple , A : Optional[Any] , A : List[str] , A : str , A : List[str] , ) -> Any:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=A).get_decoder().to(A).eval()
# first forward pass
_UpperCAmelCase = model(A , use_cache=A)
_UpperCAmelCase = model(A)
_UpperCAmelCase = model(A , use_cache=A)
self.parent.assertTrue(len(A) == len(A))
self.parent.assertTrue(len(A) == len(A) + 1)
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCAmelCase = model(A)['last_hidden_state']
_UpperCAmelCase = model(A , past_key_values=A)['last_hidden_state']
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3))
def _lowerCamelCase ( self : Optional[Any] , A : str , A : Optional[Any] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=A).to(A).half().eval()
_UpperCAmelCase = model(**A)['last_hidden_state']
self.parent.assertFalse(torch.isnan(A).any().item())
@require_torch
class __lowerCAmelCase ( A , A , A , unittest.TestCase ):
UpperCamelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCamelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCamelCase = [0.8, 0.9]
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = UMTaModelTester(self)
@unittest.skip('Test has a segmentation fault on torch 1.8.0')
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0]).to(A)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=A , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*A)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(A).eval()
model.to(A)
_UpperCAmelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=A),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=A),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=A),
}
for attn_name, (name, mask) in zip(A , head_masking.items()):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=A)
_UpperCAmelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=A , return_dict_in_generate=A , **A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.')
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged')
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=A).to(A)
_UpperCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=A , legacy=A)
_UpperCAmelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_UpperCAmelCase = tokenizer(A , return_tensors='pt' , padding=A).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
])
# fmt: on
torch.testing.assert_allclose(A , A)
_UpperCAmelCase = model.generate(input_ids.to(A))
_UpperCAmelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_UpperCAmelCase = tokenizer.batch_decode(A)
self.assertEqual(A , A)
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def A ( _UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase = model_type_to_module_name(_UpperCAmelCase )
_UpperCAmelCase = importlib.import_module(F".{module_name}" , 'transformers.models' )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , '__name__' , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def A ( _UpperCAmelCase : Union[str, os.PathLike] , _UpperCAmelCase : Optional[Union[str, os.PathLike]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[Dict[str, str]] = None , _UpperCAmelCase : Optional[Union[bool, str]] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Tuple , ) -> str:
'''simple docstring'''
_UpperCAmelCase = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase , encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class __lowerCAmelCase :
def __init__( self : Tuple) -> Tuple:
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(A)
def _lowerCamelCase ( cls : Tuple , A : Union[str, Any] , **A : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('config' , A)
_UpperCAmelCase = kwargs.pop('trust_remote_code' , A)
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(A , **A)
_UpperCAmelCase = config_dict.get('feature_extractor_type' , A)
_UpperCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {}):
_UpperCAmelCase = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A):
_UpperCAmelCase = AutoConfig.from_pretrained(A , **A)
# It could be in `config.feature_extractor_type``
_UpperCAmelCase = getattr(A , 'feature_extractor_type' , A)
if hasattr(A , 'auto_map') and "AutoFeatureExtractor" in config.auto_map:
_UpperCAmelCase = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_UpperCAmelCase = feature_extractor_class_from_name(A)
_UpperCAmelCase = feature_extractor_auto_map is not None
_UpperCAmelCase = feature_extractor_class is not None or type(A) in FEATURE_EXTRACTOR_MAPPING
_UpperCAmelCase = resolve_trust_remote_code(
A , A , A , A)
if has_remote_code and trust_remote_code:
_UpperCAmelCase = get_class_from_dynamic_module(
A , A , **A)
_UpperCAmelCase = kwargs.pop('code_revision' , A)
if os.path.isdir(A):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A) in FEATURE_EXTRACTOR_MAPPING:
_UpperCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(A)]
return feature_extractor_class.from_dict(A , **A)
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}")
@staticmethod
def _lowerCamelCase ( A : int , A : Any) -> Union[str, Any]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(A , A)
| 708
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 0
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCAmelCase__ = "src/transformers"
UpperCAmelCase__ = "docs/source/en/tasks"
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCAmelCase = f.readlines()
# Find the start prompt.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
UpperCAmelCase__ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCAmelCase__ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def A ( _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
_UpperCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCAmelCase , set() )
_UpperCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=False ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _find_text_in_file(
filename=os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
_UpperCAmelCase = get_model_list_for_task(_UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
' to fix this.' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 709
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 0
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
UpperCAmelCase__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
UpperCAmelCase__ = "main"
# Default branch name
UpperCAmelCase__ = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
UpperCAmelCase__ = "aaaaaaa"
# This commit does not exist, so we should 404.
UpperCAmelCase__ = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
UpperCAmelCase__ = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def A ( ) -> List[Any]:
'''simple docstring'''
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
print('Bonjour!' )
yield
print('Au revoir!' )
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers') is not None
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def _lowerCamelCase ( self : Optional[Any] , A : Union[str, Any]) -> List[str]:
"""simple docstring"""
with ContextManagers([]):
print('Transformers are awesome!')
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n')
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def _lowerCamelCase ( self : int , A : int) -> Optional[Any]:
"""simple docstring"""
with ContextManagers([context_en()]):
print('Transformers are awesome!')
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n')
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def _lowerCamelCase ( self : str , A : Tuple) -> Union[str, Any]:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()]):
print('Transformers are awesome!')
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n')
@require_torch
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
self.assertEqual(find_labels(A) , ['labels'])
self.assertEqual(find_labels(A) , ['labels', 'next_sentence_label'])
self.assertEqual(find_labels(A) , ['start_positions', 'end_positions'])
class __lowerCAmelCase ( A ):
pass
self.assertEqual(find_labels(A) , ['labels'])
@require_tf
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
self.assertEqual(find_labels(A) , ['labels'])
self.assertEqual(find_labels(A) , ['labels', 'next_sentence_label'])
self.assertEqual(find_labels(A) , ['start_positions', 'end_positions'])
class __lowerCAmelCase ( A ):
pass
self.assertEqual(find_labels(A) , ['labels'])
@require_flax
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
self.assertEqual(find_labels(A) , [])
self.assertEqual(find_labels(A) , [])
self.assertEqual(find_labels(A) , [])
class __lowerCAmelCase ( A ):
pass
self.assertEqual(find_labels(A) , [])
| 710
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 0
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
UpperCamelCase = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
UpperCamelCase = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def A ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments,) )
((_UpperCAmelCase ) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_UpperCAmelCase , decoder_config=_UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_UpperCAmelCase = decoder_config.decoder_start_token_id
_UpperCAmelCase = decoder_config.pad_token_id
if decoder_start_token_id is None:
_UpperCAmelCase = decoder_config.bos_token_id
if pad_token_id is None:
_UpperCAmelCase = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_UpperCAmelCase = decoder_config.eos_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 711
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 0
|
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , _UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCAmelCase ) )
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import pprint
import requests
UpperCAmelCase__ = "https://zenquotes.io/api"
def A ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def A ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
UpperCAmelCase__ = random_quotes()
pprint.pprint(response)
| 714
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = StableDiffusionSAGPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = False
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0)
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_UpperCAmelCase = CLIPTextModel(A)
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self : Optional[int] , A : Tuple , A : List[str]=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : int) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
_UpperCAmelCase = sag_pipe.to(A)
sag_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = '.'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sag_pipe(
[prompt] , generator=A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_UpperCAmelCase = sag_pipe.to(A)
sag_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = '.'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sag_pipe(
[prompt] , generator=A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_UpperCAmelCase = sag_pipe.to(A)
sag_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = '.'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
_UpperCAmelCase = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 715
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 0
|
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=A ):
UpperCamelCase = ['''torch''', '''scipy''']
def __init__( self : Any , *A : Any , **A : str) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['torch', 'scipy'])
@classmethod
def _lowerCamelCase ( cls : Dict , *A : int , **A : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *A : List[Any] , **A : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ['torch', 'scipy'])
| 716
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(A))
_UpperCAmelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_UpperCAmelCase = os.path.join(self.tmpdirname , A)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(A , A)
def _lowerCamelCase ( self : Optional[Any] , **A : Tuple) -> Optional[int]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[Any] , **A : Union[str, Any]) -> str:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Dict , **A : Any) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(A , 0 , -1)) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=A , image_processor=A)
processor_slow.save_pretrained(self.tmpdirname)
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A)
_UpperCAmelCase = CLIPSegProcessor(tokenizer=A , image_processor=A)
processor_fast.save_pretrained(self.tmpdirname)
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , A)
self.assertIsInstance(processor_fast.tokenizer , A)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , A)
self.assertIsInstance(processor_fast.image_processor , A)
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
_UpperCAmelCase = self.get_image_processor(do_normalize=A , padding_value=1.0)
_UpperCAmelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , A)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , A)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=A , image_processor=A)
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(A , return_tensors='np')
_UpperCAmelCase = processor(images=A , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=A , image_processor=A)
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = processor(text=A)
_UpperCAmelCase = tokenizer(A)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=A , image_processor=A)
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=A , images=A)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(A):
processor()
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=A , image_processor=A)
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(images=A , visual_prompt=A)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'conditional_pixel_values'])
# test if it raises when no input is passed
with pytest.raises(A):
processor()
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPSegProcessor(tokenizer=A , image_processor=A)
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(A)
_UpperCAmelCase = tokenizer.batch_decode(A)
self.assertListEqual(A , A)
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def A ( ) -> Any:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('-f' )
_UpperCAmelCase = parser.parse_args()
return args.f
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : Any) -> None:
"""simple docstring"""
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(A)
def _lowerCamelCase ( self : List[Any] , A : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py')
with patch.object(A , 'argv' , A):
_UpperCAmelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(A , 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(A)
_UpperCAmelCase = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(A)
_UpperCAmelCase = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(A)
| 718
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] , A : Any) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss']):
_UpperCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A)
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCAmelCase = 'sgugger/tiny-distilbert-classification'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , torchscript=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , fpaa=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
# set architectures equal to `None`
_UpperCAmelCase = None
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A , configs=[config])
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision')
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A , configs=[config])
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tinier_bart'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A , configs=[config])
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A , configs=[config])
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tinier_bart'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A , configs=[config])
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , 'inf_time.csv') , train_memory_csv_file=os.path.join(A , 'train_mem.csv') , inference_memory_csv_file=os.path.join(A , 'inf_mem.csv') , train_time_csv_file=os.path.join(A , 'train_time.csv') , env_info_csv_file=os.path.join(A , 'env.csv') , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
benchmark.run()
self.assertTrue(Path(os.path.join(A , 'inf_time.csv')).exists())
self.assertTrue(Path(os.path.join(A , 'train_time.csv')).exists())
self.assertTrue(Path(os.path.join(A , 'inf_mem.csv')).exists())
self.assertTrue(Path(os.path.join(A , 'train_mem.csv')).exists())
self.assertTrue(Path(os.path.join(A , 'env.csv')).exists())
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A : List[Any]):
self.assertTrue(hasattr(A , 'sequential'))
self.assertTrue(hasattr(A , 'cumulative'))
self.assertTrue(hasattr(A , 'current'))
self.assertTrue(hasattr(A , 'total'))
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , 'log.txt') , log_print=A , trace_memory_line_by_line=A , multi_process=A , )
_UpperCAmelCase = PyTorchBenchmark(A)
_UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(A , 'log.txt')).exists())
| 719
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 0
|
def A ( _UpperCAmelCase : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('Input must be a positive integer' )
_UpperCAmelCase = [True] * (num + 1)
_UpperCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _UpperCAmelCase ):
_UpperCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 720
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 0
|
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 721
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 1
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: str = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def lowerCAmelCase__ ( self : Any , a : Tuple=0 ):
'''simple docstring'''
lowercase_ : Optional[int] = np.random.RandomState(a )
lowercase_ : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a )
lowercase_ : List[Any] = self.get_dummy_inputs()
lowercase_ : Tuple = pipe(**a ).images
lowercase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase_ : str = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowercase_ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Dict = self.get_dummy_inputs()
lowercase_ : List[Any] = pipe(**a ).images
lowercase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase_ : List[str] = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowercase_ : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Optional[int] = self.get_dummy_inputs()
lowercase_ : str = pipe(**a ).images
lowercase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase_ : Any = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowercase_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = self.get_dummy_inputs()
lowercase_ : Optional[Any] = pipe(**a ).images
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase_ : int = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowercase_ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Dict = self.get_dummy_inputs()
lowercase_ : str = pipe(**a ).images
lowercase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase_ : str = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowercase_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase_ : List[Any] = self.get_dummy_inputs()
lowercase_ : str = pipe(**a ).images
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase_ : List[Any] = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Dict = self.get_dummy_inputs()
lowercase_ : Dict = 3 * [inputs["prompt"]]
# forward
lowercase_ : Optional[Any] = pipe(**a )
lowercase_ : Optional[Any] = output.images[0, -3:, -3:, -1]
lowercase_ : List[str] = self.get_dummy_inputs()
lowercase_ : Optional[int] = 3 * [inputs.pop("prompt" )]
lowercase_ : Dict = pipe.tokenizer(
a , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors="np" , )
lowercase_ : List[str] = text_inputs["input_ids"]
lowercase_ : Optional[int] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowercase_ : Dict = prompt_embeds
# forward
lowercase_ : Dict = pipe(**a )
lowercase_ : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Union[str, Any] = self.get_dummy_inputs()
lowercase_ : Any = 3 * ["this is a negative prompt"]
lowercase_ : Optional[Any] = negative_prompt
lowercase_ : Optional[Any] = 3 * [inputs["prompt"]]
# forward
lowercase_ : List[str] = pipe(**a )
lowercase_ : Optional[int] = output.images[0, -3:, -3:, -1]
lowercase_ : Any = self.get_dummy_inputs()
lowercase_ : Optional[int] = 3 * [inputs.pop("prompt" )]
lowercase_ : Union[str, Any] = []
for p in [prompt, negative_prompt]:
lowercase_ : Dict = pipe.tokenizer(
a , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors="np" , )
lowercase_ : Dict = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowercase_ , lowercase_ : Any = embeds
# forward
lowercase_ : Optional[Any] = pipe(**a )
lowercase_ : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[int] = ort.SessionOptions()
lowercase_ : Tuple = False
return options
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
lowercase_ : Optional[Any] = "A painting of a squirrel eating a burger"
np.random.seed(0 )
lowercase_ : Optional[Any] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type="np" )
lowercase_ : int = output.images
lowercase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ : int = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Tuple = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowercase_ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = "open neural network exchange"
lowercase_ : List[str] = np.random.RandomState(0 )
lowercase_ : Tuple = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type="np" )
lowercase_ : Any = output.images
lowercase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ : str = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowercase_ : int = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
lowercase_ : Tuple = "open neural network exchange"
lowercase_ : Tuple = np.random.RandomState(0 )
lowercase_ : Any = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type="np" )
lowercase_ : str = output.images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ : Optional[int] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = 0
def test_callback_fn(a : int , a : int , a : np.ndarray ) -> None:
lowercase_ : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
lowercase_ : List[str] = latents[0, -3:, -3:, -1]
lowercase_ : Dict = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
lowercase_ : List[Any] = latents[0, -3:, -3:, -1]
lowercase_ : Optional[int] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
lowercase_ : Any = False
lowercase_ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Union[str, Any] = "Andromeda galaxy in a bottle"
lowercase_ : Union[str, Any] = np.random.RandomState(0 )
pipe(
prompt=a , num_inference_steps=5 , guidance_scale=7.5 , generator=a , callback=a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : int = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(a , a )
assert pipe.safety_checker is None
lowercase_ : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase_ : int = OnnxStableDiffusionPipeline.from_pretrained(a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase_ : Any = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCamelCase__ = 'hf-internal-testing/tiny-random-bert'
UpperCamelCase__ = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
UpperCamelCase__ = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = cached_file(a , a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(a , a ) ) )
with open(os.path.join(a , "refs" , "main" ) ) as f:
lowercase_ : List[Any] = f.read()
self.assertEqual(a , os.path.join(a , "snapshots" , a , a ) )
self.assertTrue(os.path.isfile(a ) )
# File is cached at the same place the second time.
lowercase_ : List[str] = cached_file(a , a )
self.assertEqual(a , a )
# Using a specific revision to test the full commit hash.
lowercase_ : int = cached_file(a , a , revision="9b8c223" )
self.assertEqual(a , os.path.join(a , "snapshots" , a , a ) )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(a , "is not a valid model identifier" ):
lowercase_ : Any = cached_file("tiny-random-bert" , a )
with self.assertRaisesRegex(a , "is not a valid git identifier" ):
lowercase_ : Tuple = cached_file(a , a , revision="aaaa" )
with self.assertRaisesRegex(a , "does not appear to have a file named" ):
lowercase_ : List[Any] = cached_file(a , "conf" )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(a , "does not appear to have a file named" ):
lowercase_ : Optional[Any] = cached_file(a , "conf" )
with open(os.path.join(a , "refs" , "main" ) ) as f:
lowercase_ : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(a , ".no_exist" , a , "conf" ) ) )
lowercase_ : int = cached_file(a , "conf" , _raise_exceptions_for_missing_entries=a )
self.assertIsNone(a )
lowercase_ : List[Any] = cached_file(a , "conf" , local_files_only=a , _raise_exceptions_for_missing_entries=a )
self.assertIsNone(a )
lowercase_ : int = mock.Mock()
lowercase_ : Tuple = 5_0_0
lowercase_ : Tuple = {}
lowercase_ : List[Any] = HTTPError
lowercase_ : Optional[int] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a ) as mock_head:
lowercase_ : Union[str, Any] = cached_file(a , "conf" , _raise_exceptions_for_connection_errors=a )
self.assertIsNone(a )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , a ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , a ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , a ) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(a , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(a , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , a , revision="ahaha" )
lowercase_ : Optional[int] = get_file_from_repo("bert-base-cased" , a )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase_ : Dict = json.loads(open(a , "r" ).read() )
self.assertEqual(config["hidden_size"] , 7_6_8 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = Path(a ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(a , "a.txt" ) , str(a ) )
self.assertIsNone(get_file_from_repo(a , "b.txt" ) )
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[PIL.Image.Image, np.ndarray]
class _UpperCAmelCase ( snake_case ):
def __init__( self : Union[str, Any] , a : PriorTransformer , a : CLIPVisionModel , a : CLIPImageProcessor , a : HeunDiscreteScheduler , a : ShapERenderer , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=a , image_encoder=a , image_processor=a , scheduler=a , renderer=a , )
def lowerCAmelCase__ ( self : List[str] , a : List[Any] , a : List[str] , a : str , a : str , a : Tuple , a : Tuple ):
'''simple docstring'''
if latents is None:
lowercase_ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Dict , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Any , a : Optional[int] , ):
'''simple docstring'''
if isinstance(a , a ) and isinstance(image[0] , torch.Tensor ):
lowercase_ : List[str] = torch.cat(a , axis=0 ) if image[0].ndim == 4 else torch.stack(a , axis=0 )
if not isinstance(a , torch.Tensor ):
lowercase_ : Any = self.image_processor(a , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowercase_ : Optional[Any] = image.to(dtype=self.image_encoder.dtype , device=a )
lowercase_ : int = self.image_encoder(a )["last_hidden_state"]
lowercase_ : Tuple = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase_ : Dict = image_embeds.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Union[str, Any] = torch.zeros_like(a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[PIL.Image.Image, List[PIL.Image.Image]] , a : int = 1 , a : int = 2_5 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : float = 4.0 , a : int = 6_4 , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , PIL.Image.Image ):
lowercase_ : Optional[Any] = 1
elif isinstance(a , torch.Tensor ):
lowercase_ : str = image.shape[0]
elif isinstance(a , a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase_ : Tuple = len(a )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(a )}""" )
lowercase_ : Union[str, Any] = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ : Optional[Any] = self._encode_image(a , a , a , a )
# prior
self.scheduler.set_timesteps(a , device=a )
lowercase_ : Any = self.scheduler.timesteps
lowercase_ : List[str] = self.prior.config.num_embeddings
lowercase_ : Optional[int] = self.prior.config.embedding_dim
lowercase_ : str = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , a , a , a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase_ : Dict = latents.reshape(latents.shape[0] , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Any = self.scheduler.scale_model_input(a , a )
lowercase_ : List[Any] = self.prior(
a , timestep=a , proj_embedding=a , ).predicted_image_embedding
# remove the variance
lowercase_ , lowercase_ : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase_ , lowercase_ : Dict = noise_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase_ : List[Any] = self.scheduler.step(
a , timestep=a , sample=a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=a )
lowercase_ : Any = []
for i, latent in enumerate(a ):
print()
lowercase_ : Optional[int] = self.renderer.decode(
latent[None, :] , a , size=a , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(a )
lowercase_ : int = torch.stack(a )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowercase_ : str = images.cpu().numpy()
if output_type == "pil":
lowercase_ : Optional[int] = [self.numpy_to_pil(a ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=a )
| 640
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 1
|
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
"""simple docstring"""
model.train()
lowercase_ : Optional[Any] = model(_UpperCamelCase )
lowercase_ : List[Any] = F.mse_loss(_UpperCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
set_seed(42 )
lowercase_ : List[str] = RegressionModel()
lowercase_ : Tuple = deepcopy(_UpperCamelCase )
lowercase_ : Tuple = RegressionDataset(length=80 )
lowercase_ : List[Any] = DataLoader(_UpperCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase_ : List[str] = AdamW(params=model.parameters() , lr=1e-3 )
lowercase_ : Optional[Any] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowercase_ : Tuple = LambdaLR(_UpperCamelCase , lr_lambda=lambda _UpperCamelCase : epoch**0.65 )
lowercase_ : str = LambdaLR(_UpperCamelCase , lr_lambda=lambda _UpperCamelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
lowercase_ , lowercase_ : Any = accelerator.prepare(_UpperCamelCase , _UpperCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ : List[Any] = get_training_setup(_UpperCamelCase )
# Use a single batch
lowercase_ , lowercase_ : Optional[int] = next(iter(_UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ : List[str] = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCamelCase ):
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
# Sync grads
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase_ : Dict = ddp_input[torch.randperm(len(_UpperCamelCase ) )]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ : int = get_training_setup(_UpperCamelCase )
# Use a single batch
lowercase_ , lowercase_ : List[Any] = next(iter(_UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ : Any = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCamelCase ):
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
# Sync grads
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase_ : str = ddp_input[torch.randperm(len(_UpperCamelCase ) )]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : Any = Accelerator(
split_batches=_UpperCamelCase , dispatch_batches=_UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ : Any = get_training_setup(_UpperCamelCase )
for iteration, batch in enumerate(_UpperCamelCase ):
lowercase_ , lowercase_ : Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_UpperCamelCase ):
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_UpperCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase_ : int = ddp_input[torch.randperm(len(_UpperCamelCase ) )]
GradientState._reset_state()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : int = Accelerator(
split_batches=_UpperCamelCase , dispatch_batches=_UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = get_training_setup(_UpperCamelCase , _UpperCamelCase )
for iteration, batch in enumerate(_UpperCamelCase ):
lowercase_ , lowercase_ : List[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ : Tuple = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_UpperCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_UpperCamelCase ):
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
lowercase_ : int = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_UpperCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = Accelerator()
lowercase_ : str = RegressionDataset(length=80 )
lowercase_ : List[Any] = DataLoader(_UpperCamelCase , batch_size=16 )
lowercase_ : List[str] = RegressionDataset(length=96 )
lowercase_ : int = DataLoader(_UpperCamelCase , batch_size=16 )
lowercase_ , lowercase_ : Union[str, Any] = accelerator.prepare(_UpperCamelCase , _UpperCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCamelCase )
if iteration < len(_UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCamelCase )
if batch_num < len(_UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Union[str, Any] = Accelerator()
lowercase_ : Tuple = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_UpperCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_UpperCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_UpperCamelCase , _UpperCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 640
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( snake_case ):
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( a : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
raise NotImplementedError()
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 1
|
'''simple docstring'''
from ....utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
def __init__( self : Optional[Any] , a : Optional[int] , a : Any=None , a : Any=2_0_4_8 ):
'''simple docstring'''
lowercase_ : int = config.__dict__
lowercase_ : int = modal_hidden_size
if num_labels:
lowercase_ : List[Any] = num_labels
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowercase_ : str = (low + high) // 2
lowercase_ , lowercase_ , lowercase_ : List[Any] = max_subarray(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = max_subarray(_UpperCamelCase , mid + 1 , _UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : str = max_cross_sum(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = float("-inf" ), -1
lowercase_ , lowercase_ : Dict = float("-inf" ), -1
lowercase_ : int | float = 0
for i in range(_UpperCamelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowercase_ : int = summ
lowercase_ : Union[str, Any] = i
lowercase_ : Tuple = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowercase_ : Dict = summ
lowercase_ : str = i
return max_left, max_right, (left_sum + right_sum)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = [randint(1 , _UpperCamelCase ) for _ in range(_UpperCamelCase )]
lowercase_ : List[str] = time.time()
max_subarray(_UpperCamelCase , 0 , input_size - 1 )
lowercase_ : Optional[int] = time.time()
return end - start
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
lowercase_ : Tuple = [time_max_subarray(_UpperCamelCase ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(_UpperCamelCase , _UpperCamelCase ):
print(_UpperCamelCase , "\t\t" , _UpperCamelCase )
plt.plot(_UpperCamelCase , _UpperCamelCase )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 1
|
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _UpperCAmelCase ( snake_case ):
@add_start_docstrings(a )
def __call__( self : Any , a : torch.LongTensor , a : torch.FloatTensor , **a : Dict ):
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class _UpperCAmelCase ( snake_case ):
def __init__( self : str , a : int , a : Optional[int] = None ):
'''simple docstring'''
lowercase_ : List[Any] = max_length
lowercase_ : List[Any] = max_position_embeddings
@add_start_docstrings(a )
def __call__( self : Union[str, Any] , a : torch.LongTensor , a : torch.FloatTensor , **a : List[str] ):
'''simple docstring'''
lowercase_ : Tuple = input_ids.shape[-1]
lowercase_ : str = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : int , a : int ):
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , a , )
lowercase_ : List[Any] = start_length
lowercase_ : Tuple = max_new_tokens
lowercase_ : str = start_length + max_new_tokens
@add_start_docstrings(a )
def __call__( self : int , a : torch.LongTensor , a : torch.FloatTensor , **a : Optional[int] ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _UpperCAmelCase ( snake_case ):
def __init__( self : Optional[int] , a : float , a : Optional[float] = None ):
'''simple docstring'''
lowercase_ : Tuple = max_time
lowercase_ : Optional[int] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(a )
def __call__( self : int , a : torch.LongTensor , a : torch.FloatTensor , **a : Optional[Any] ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _UpperCAmelCase ( snake_case ):
@add_start_docstrings(a )
def __call__( self : List[Any] , a : torch.LongTensor , a : torch.FloatTensor , **a : List[Any] ):
'''simple docstring'''
return any(criteria(a , a ) for criteria in self )
@property
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(a , a ):
return stopping_criterium.max_length
elif isinstance(a , a ):
return stopping_criterium.max_length
return None
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = stopping_criteria.max_length
lowercase_ : List[str] = deepcopy(_UpperCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _UpperCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_UpperCamelCase ) )
return new_stopping_criteria
| 640
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
UpperCamelCase__ = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowercase_ : Union[str, Any] = int(re.match(R".*layer_(\d*).*" , _UpperCamelCase )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowercase_ : List[str] = re.search(R"[^\d](\d+)$" , str(_UpperCamelCase ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
lowercase_ : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if bloom_config_file == "":
lowercase_ : Tuple = BloomConfig()
else:
lowercase_ : str = BloomConfig.from_json_file(_UpperCamelCase )
if shard_model:
lowercase_ : int = os.listdir(_UpperCamelCase )
lowercase_ : Union[str, Any] = sorted(filter(lambda _UpperCamelCase : s.startswith("layer" ) and "model_00" in s , _UpperCamelCase ) )
lowercase_ : str = {"weight_map": {}, "metadata": {}}
lowercase_ : Any = 0
lowercase_ : str = None
lowercase_ : Any = BloomConfig()
for j, file in enumerate(_UpperCamelCase ):
print("Processing file: {}".format(_UpperCamelCase ) )
lowercase_ : int = None
for i in range(_UpperCamelCase ):
# load all TP files
lowercase_ : Union[str, Any] = file.replace("model_00" , F"""model_0{i}""" )
lowercase_ : Dict = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
lowercase_ : str = list(temp.keys() )
for key in keys:
lowercase_ : Tuple = temp.pop(_UpperCamelCase )
if tensors is None:
lowercase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ : Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ : Union[str, Any] = torch.cat([tensors[key], temp[key]] , dim=_UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ : str = tensors[key] / pretraining_tp
torch.save(
_UpperCamelCase , os.path.join(
_UpperCamelCase , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(_UpperCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowercase_ : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowercase_ : Any = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(_UpperCamelCase ) ).zfill(5 ) )
lowercase_ : int = BloomConfig()
lowercase_ : List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
lowercase_ : Dict = total_size
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_UpperCamelCase , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
lowercase_ : Any = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + "\n"
f.write(_UpperCamelCase )
else:
lowercase_ : Tuple = BloomModel(_UpperCamelCase )
lowercase_ : Any = os.listdir(_UpperCamelCase )
lowercase_ : Dict = sorted(filter(lambda _UpperCamelCase : s.startswith("layer" ) and "model_00" in s , _UpperCamelCase ) )
lowercase_ : List[Any] = None
for i, file in enumerate(_UpperCamelCase ):
lowercase_ : int = None
for i in range(_UpperCamelCase ):
# load all TP files
lowercase_ : List[Any] = file.replace("model_00" , F"""model_0{i}""" )
lowercase_ : List[Any] = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
lowercase_ : int = list(temp.keys() )
for key in keys:
lowercase_ : Optional[Any] = temp.pop(_UpperCamelCase )
if tensors is None:
lowercase_ : List[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ : Any = torch.cat([tensors[key], temp[key]] , dim=_UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ : Any = tensors[key] / pretraining_tp
lowercase_ : int = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
lowercase_ : List[str] = set(other_keys.missing_keys )
else:
lowercase_ : int = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
lowercase_ : int = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowercase_ : str = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
lowercase_ : List[str] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
UpperCamelCase__ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 640
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase_ : List[Any] = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowercase_ : Optional[Any] = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowercase_ : Any = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowercase_ : Optional[int] = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowercase_ : Dict = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowercase_ : Union[str, Any] = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowercase_ : Tuple = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowercase_ : str = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowercase_ : Optional[int] = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowercase_ : Any = key.replace("image_encoder.module" , "flava.image_model" )
lowercase_ : Tuple = key.replace("text_encoder.module" , "flava.text_model" )
lowercase_ : str = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowercase_ : Optional[Any] = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowercase_ : Tuple = key.replace("text_projection" , "flava.text_projection" )
lowercase_ : Optional[Any] = key.replace("image_projection" , "flava.image_projection" )
lowercase_ : List[str] = value.float()
for key, value in codebook_state_dict.items():
lowercase_ : Dict = value
return upgrade
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
if config_path is not None:
lowercase_ : List[Any] = FlavaConfig.from_pretrained(_UpperCamelCase )
else:
lowercase_ : str = FlavaConfig()
lowercase_ : Optional[Any] = FlavaForPreTraining(_UpperCamelCase ).eval()
lowercase_ : Any = convert_dalle_checkpoint(_UpperCamelCase , _UpperCamelCase , save_checkpoint=_UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
lowercase_ : Optional[int] = torch.load(_UpperCamelCase , map_location="cpu" )
else:
lowercase_ : str = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )
lowercase_ : str = upgrade_state_dict(_UpperCamelCase , _UpperCamelCase )
hf_model.load_state_dict(_UpperCamelCase )
lowercase_ : Tuple = hf_model.state_dict()
lowercase_ : Optional[Any] = count_parameters(_UpperCamelCase )
lowercase_ : List[Any] = count_parameters(_UpperCamelCase ) + count_parameters(_UpperCamelCase )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCamelCase__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 640
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.