code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :Dict ):
super().tearDown()
gc.collect()
def a__ ( self :Optional[int] ):
snake_case_ , snake_case_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,)
snake_case_ : Dict = """A painting of a squirrel eating a burger"""
snake_case_ : int = jax.device_count()
snake_case_ : Any = num_samples * [prompt]
snake_case_ : Tuple = sd_pipe.prepare_inputs(lowerCamelCase__ )
snake_case_ : Optional[int] = replicate(lowerCamelCase__ )
snake_case_ : Any = shard(lowerCamelCase__ )
snake_case_ : str = jax.random.PRNGKey(0 )
snake_case_ : Dict = jax.random.split(lowerCamelCase__ ,jax.device_count() )
snake_case_ : Optional[int] = sd_pipe(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,num_inference_steps=2_5 ,jit=lowerCamelCase__ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
snake_case_ : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ : Union[str, Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
snake_case_ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ : Union[str, Any] = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a__ ( self :Union[str, Any] ):
snake_case_ : str = """stabilityai/stable-diffusion-2"""
snake_case_ , snake_case_ : Any = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCamelCase__ ,subfolder="""scheduler""" )
snake_case_ , snake_case_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
lowerCamelCase__ ,scheduler=lowerCamelCase__ ,revision="""bf16""" ,dtype=jnp.bfloataa ,)
snake_case_ : Union[str, Any] = scheduler_params
snake_case_ : Optional[Any] = """A painting of a squirrel eating a burger"""
snake_case_ : str = jax.device_count()
snake_case_ : str = num_samples * [prompt]
snake_case_ : Optional[int] = sd_pipe.prepare_inputs(lowerCamelCase__ )
snake_case_ : int = replicate(lowerCamelCase__ )
snake_case_ : Optional[Any] = shard(lowerCamelCase__ )
snake_case_ : int = jax.random.PRNGKey(0 )
snake_case_ : Any = jax.random.split(lowerCamelCase__ ,jax.device_count() )
snake_case_ : Dict = sd_pipe(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,num_inference_steps=2_5 ,jit=lowerCamelCase__ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
snake_case_ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ : Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
snake_case_ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ : Optional[Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 334
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class A ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = """hf-internal-testing/tiny-random-t5"""
lowercase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
lowercase__ = tokenizer("""This is me""" , return_tensors="""pt""" )
lowercase__ = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ = model.generate(**lowerCamelCase__ )
lowercase__ = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ = model_reloaded.generate(**lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """hf-internal-testing/tiny-random-t5"""
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
lowercase__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase__ ):
model.save_pretrained(lowerCamelCase__ )
lowercase__ = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase__ )
| 325
| 0
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = val
__a : Any = None
__a : Optional[Any] = None
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a : Any = Node(_lowercase )
else:
self.left.insert(_lowercase )
elif val > self.val:
if self.right is None:
__a : str = Node(_lowercase )
else:
self.right.insert(_lowercase )
else:
__a : Tuple = val
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] ):
# Recursive traversal
if root:
inorder(root.left , _lowerCamelCase )
res.append(root.val )
inorder(root.right , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[int] ):
# Build BST
if len(_lowerCamelCase ) == 0:
return arr
__a : Optional[int] = Node(arr[0] )
for i in range(1 , len(_lowerCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a : Tuple = []
inorder(_lowerCamelCase , _lowerCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 63
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63
| 1
|
"""simple docstring"""
def lowercase__ ( lowerCamelCase ):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
_SCREAMING_SNAKE_CASE : Dict = sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : str = {}
_UpperCAmelCase : Optional[Any] = job["""started_at"""]
_UpperCAmelCase : List[Any] = job["""completed_at"""]
_UpperCAmelCase : Optional[int] = date_parser.parse(lowerCAmelCase_ )
_UpperCAmelCase : str = date_parser.parse(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase : Tuple = start
_UpperCAmelCase : str = end
_UpperCAmelCase : List[Any] = duration_in_min
return job_info
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
_UpperCAmelCase : str = None
if token is not None:
_UpperCAmelCase : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
_UpperCAmelCase : Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_UpperCAmelCase : Union[str, Any] = requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
_UpperCAmelCase : int = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowerCAmelCase_ ) for job in result["""jobs"""]} )
_UpperCAmelCase : str = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = requests.get(url + f"&page={i + 2}" , headers=lowerCAmelCase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowerCAmelCase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
lowerCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
lowerCAmelCase_ : int = get_job_time(args.workflow_run_id)
lowerCAmelCase_ : Optional[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"{k}: {v['duration']}")
| 414
| 0
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , _A : str = "" , _A : Optional[str] = None , _A : Optional[dict] = None , **_A : Any):
"""simple docstring"""
super().__init__(self , **_A)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_SCREAMING_SNAKE_CASE : List[Any] = fsspec.open(
_A , mode="""rb""" , protocol=_A , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.basename(self.file.path.split("""::""")[0])
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
_SCREAMING_SNAKE_CASE : Optional[int] = None
@classmethod
def _lowerCAmelCase ( cls : Any , _A : Tuple):
"""simple docstring"""
return super()._strip_protocol(_A).lstrip("""/""")
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
_SCREAMING_SNAKE_CASE : List[str] = {f["""name"""]: f}
def _lowerCAmelCase ( self : Optional[int] , _A : str):
"""simple docstring"""
return self.file.open().read()
def _lowerCAmelCase ( self : str , _A : str , _A : str = "rb" , _A : Dict=None , _A : Tuple=True , _A : List[str]=None , **_A : Tuple , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self._strip_protocol(_A)
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""")
return self.file.open()
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "bz2"
a = "bz2"
a = ".bz2"
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "gzip"
a = "gzip"
a = ".gz"
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "lz4"
a = "lz4"
a = ".lz4"
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "xz"
a = "xz"
a = ".xz"
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "zstd"
a = "zstd"
a = ".zst"
def __init__( self : int , _A : str , _A : str = "rb" , _A : Optional[str] = None , _A : Optional[dict] = None , _A : int = DEFAULT_BLOCK_SIZE , **_A : Dict , ):
"""simple docstring"""
super().__init__(
fo=_A , mode=_A , target_protocol=_A , target_options=_A , block_size=_A , **_A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_SCREAMING_SNAKE_CASE : Optional[int] = self.file.__enter__
class _snake_case :
"""simple docstring"""
def __init__( self : Tuple , _A : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = file_
def __enter__( self : Optional[Any]):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Any , *_A : Tuple , **_A : Optional[int]):
"""simple docstring"""
self._file.__exit__(*_A , **_A)
def __iter__( self : int):
"""simple docstring"""
return iter(self._file)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return next(self._file)
def __getattr__( self : Dict , _A : str):
"""simple docstring"""
return getattr(self._file , _A)
def fixed_enter(*_A : Tuple , **_A : Any):
return WrappedFile(_enter(*_A , **_A))
_SCREAMING_SNAKE_CASE : Optional[Any] = fixed_enter
| 635
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : int = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : List[Any] = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = parquet_path
elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for split in splits:
_SCREAMING_SNAKE_CASE : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
if split:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE : Optional[int] = """train"""
_SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE : str = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
| 635
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = 42
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] = 6_5_5_3_6 , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : str = 2 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : Optional[Any] = 0 , UpperCamelCase__ : Union[str, Any] = "fourier" , UpperCamelCase__ : str = True , UpperCamelCase__ : Optional[Any] = False , UpperCamelCase__ : List[Any] = 0.0 , UpperCamelCase__ : Tuple = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCamelCase__ : Dict = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCamelCase__ : Any = "UNetMidBlock1D" , UpperCamelCase__ : str = None , UpperCamelCase__ : Tuple = (3_2, 3_2, 6_4) , UpperCamelCase__ : str = None , UpperCamelCase__ : Dict = 8 , UpperCamelCase__ : Tuple = 1 , UpperCamelCase__ : Optional[int] = False , ):
'''simple docstring'''
super().__init__()
snake_case__ = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCamelCase__ , log=UpperCamelCase__ , flip_sin_to_cos=UpperCamelCase__)
snake_case__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCamelCase__ , downscale_freq_shift=UpperCamelCase__)
snake_case__ = block_out_channels[0]
if use_timestep_embedding:
snake_case__ = block_out_channels[0] * 4
snake_case__ = TimestepEmbedding(
in_channels=UpperCamelCase__ , time_embed_dim=UpperCamelCase__ , act_fn=UpperCamelCase__ , out_dim=block_out_channels[0] , )
snake_case__ = nn.ModuleList([])
snake_case__ = None
snake_case__ = nn.ModuleList([])
snake_case__ = None
# down
snake_case__ = in_channels
for i, down_block_type in enumerate(UpperCamelCase__):
snake_case__ = output_channel
snake_case__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ = i == len(UpperCamelCase__) - 1
snake_case__ = get_down_block(
UpperCamelCase__ , num_layers=UpperCamelCase__ , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCamelCase__)
# mid
snake_case__ = get_mid_block(
UpperCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCamelCase__ , add_downsample=UpperCamelCase__ , )
# up
snake_case__ = list(reversed(UpperCamelCase__))
snake_case__ = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ = out_channels
else:
snake_case__ = block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__):
snake_case__ = output_channel
snake_case__ = (
reversed_block_out_channels[i + 1] if i < len(UpperCamelCase__) - 1 else final_upsample_channels
)
snake_case__ = i == len(UpperCamelCase__) - 1
snake_case__ = get_up_block(
UpperCamelCase__ , num_layers=UpperCamelCase__ , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCamelCase__)
snake_case__ = output_channel
# out
snake_case__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2)
snake_case__ = get_out_block(
out_block_type=UpperCamelCase__ , num_groups_out=UpperCamelCase__ , embed_dim=block_out_channels[0] , out_channels=UpperCamelCase__ , act_fn=UpperCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] = True , ):
'''simple docstring'''
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(sample.device)
snake_case__ = self.time_proj(UpperCamelCase__)
if self.config.use_timestep_embedding:
snake_case__ = self.time_mlp(UpperCamelCase__)
else:
snake_case__ = timestep_embed[..., None]
snake_case__ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
snake_case__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
snake_case__ = ()
for downsample_block in self.down_blocks:
snake_case__ = downsample_block(hidden_states=UpperCamelCase__ , temb=UpperCamelCase__)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
snake_case__ = down_block_res_samples[-1:]
snake_case__ = down_block_res_samples[:-1]
snake_case__ = upsample_block(UpperCamelCase__ , res_hidden_states_tuple=UpperCamelCase__ , temb=UpperCamelCase__)
# 5. post-process
if self.out_block:
snake_case__ = self.out_block(UpperCamelCase__ , UpperCamelCase__)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCamelCase__)
| 654
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase ={
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowercase =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 446
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =2
SCREAMING_SNAKE_CASE_ : List[str] =[]
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> int:
def wrapper(*UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE_ : Optional[int] =timeit.default_timer()
SCREAMING_SNAKE_CASE_ : Dict =func(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE_ : Any =func.__name__
return wrapper
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : Any=1_0_0 , UpperCAmelCase_ : Dict=None ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
SCREAMING_SNAKE_CASE_ : List[str] =seq_shapes or {}
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase_ , _ArrayXD ):
SCREAMING_SNAKE_CASE_ : Any =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase_ , datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''The small grey turtle was surprisingly fast when challenged.'''
else:
SCREAMING_SNAKE_CASE_ : int =np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase_ , datasets.Sequence ):
while isinstance(UpperCAmelCase_ , datasets.Sequence ):
SCREAMING_SNAKE_CASE_ : Tuple =v.feature
SCREAMING_SNAKE_CASE_ : Optional[Any] =seq_shapes[k]
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.random.rand(*UpperCAmelCase_ ).astype(v.dtype )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=1_0_0 , UpperCAmelCase_ : Dict=None ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[int] =generate_examples(UpperCAmelCase_ , num_examples=UpperCAmelCase_ , seq_shapes=UpperCAmelCase_ )
with ArrowWriter(features=UpperCAmelCase_ , path=UpperCAmelCase_ ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE_ : List[Any] =features.encode_example(UpperCAmelCase_ )
writer.write(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
SCREAMING_SNAKE_CASE_ : List[Any] =datasets.Dataset.from_file(filename=UpperCAmelCase_ , info=datasets.DatasetInfo(features=UpperCAmelCase_ ) )
return dataset
| 431
| 0
|
'''simple docstring'''
from math import sqrt
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 0
for i in range(1 , int(sqrt(lowerCAmelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCAmelCase_ ):
total += i + n // i
elif i == sqrt(lowerCAmelCase_ ):
total += i
return total - n
def UpperCAmelCase_ ( lowerCAmelCase_ = 1_0000 ):
"""simple docstring"""
lowercase = sum(
i
for i in range(1 , lowerCAmelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCAmelCase_ ) ) == i and sum_of_divisors(lowerCAmelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 310
|
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__lowerCamelCase : str = "__DUMMY_TRANSFORMERS_USER__"
__lowerCamelCase : Optional[Any] = "Dummy User"
__lowerCamelCase : str = "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"
__lowerCamelCase : List[Any] = "https://hub-ci.huggingface.co"
__lowerCamelCase : List[str] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
__lowerCamelCase : Any = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
__lowerCamelCase : str = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCAmelCase_ )
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCAmelCase_ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCAmelCase_ )
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCAmelCase_ )
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
HfFolder.save_token(lowerCAmelCase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( ):
"""simple docstring"""
return HfApi(endpoint=lowerCAmelCase_ )
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = HfFolder.get_token()
HfFolder.save_token(lowerCAmelCase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCAmelCase_ )
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
def _cleanup_repo(lowerCAmelCase_ ):
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
@contextmanager
def _temporary_repo(lowerCAmelCase_ ):
try:
yield repo_id
finally:
cleanup_repo(lowerCAmelCase_ )
return _temporary_repo
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = f'repo_txt_data-{int(time.time() * 1_0E3 )}'
lowercase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data/text_data.txt" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = f'repo_zipped_txt_data-{int(time.time() * 1_0E3 )}'
lowercase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = f'repo_zipped_img_data-{int(time.time() * 1_0E3 )}'
lowercase = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 310
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688
| 0
|
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case = 10
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if array[i] == target:
return i
return -1
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = 0
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowercase_ = one_third - 1
elif array[two_third] < target:
lowercase_ = two_third + 1
else:
lowercase_ = one_third + 1
lowercase_ = two_third - 1
else:
return -1
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE_ , one_third - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__snake_case = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case = ite_ternary_search(collection, target)
__snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 451
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class a__( lowerCamelCase__ ):
lowercase__ = 42
lowercase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class a__( lowerCamelCase__ ):
lowercase__ = 42
lowercase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 709
|
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowercase_ ( self : Dict , __snake_case : Union[str, Any]=0 ):
a : List[str] = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__snake_case ) )
a : Optional[Any] = np.random.RandomState(__snake_case )
a : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
a : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__snake_case )
a : Any = self.get_dummy_inputs()
a : int = pipe(**__snake_case ).images
a : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
a : Dict = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self : str ):
a : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : Any = self.get_dummy_inputs()
a : str = pipe(**__snake_case ).images
a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a : int = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self : List[str] ):
a : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
# warmup pass to apply optimizations
a : Any = pipe(**self.get_dummy_inputs() )
a : Optional[int] = self.get_dummy_inputs()
a : Optional[int] = pipe(**__snake_case ).images
a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self : List[Any] ):
a : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
a : List[str] = self.get_dummy_inputs()
a : Optional[int] = pipe(**__snake_case ).images
a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a : Dict = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self : Tuple ):
a : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
a : Dict = self.get_dummy_inputs()
a : Union[str, Any] = pipe(**__snake_case ).images
a : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a : int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self : Optional[Any] ):
a : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
a : Tuple = self.get_dummy_inputs()
a : Optional[Any] = pipe(**__snake_case ).images
a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a : Optional[int] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a__( unittest.TestCase ):
@property
def lowercase_ ( self : List[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self : Dict ):
a : Tuple = ort.SessionOptions()
a : Optional[Any] = False
return options
def lowercase_ ( self : List[str] ):
a : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a : Optional[int] = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
a : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = 'A fantasy landscape, trending on artstation'
a : Dict = np.random.RandomState(0 )
a : Optional[int] = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type='np' , )
a : str = output.images
a : Optional[int] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
a : List[str] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self : str ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a : List[str] = init_image.resize((7_68, 5_12) )
a : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
a : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
a : Optional[int] = 'A fantasy landscape, trending on artstation'
a : str = np.random.RandomState(0 )
a : List[str] = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type='np' , )
a : str = output.images
a : Tuple = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
a : Union[str, Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 195
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 31
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase = """ViltImageProcessor"""
__UpperCAmelCase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
_UpperCamelCase : Any = kwargs.pop("feature_extractor" )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.image_processor
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel_values + pixel_mask
_UpperCamelCase : int = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
encoding.update(lowerCAmelCase__ )
return encoding
def lowercase_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.tokenizer.model_input_names
_UpperCamelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ (self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase__ , )
return self.image_processor_class
@property
def lowercase_ (self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase__ , )
return self.image_processor
| 239
|
"""simple docstring"""
from math import ceil
def __lowerCAmelCase ( __lowerCAmelCase : int = 1001 ) -> int:
_UpperCamelCase : Tuple = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_UpperCamelCase : Tuple = 2 * i + 1
_UpperCamelCase : Optional[Any] = 2 * i
_UpperCamelCase : Optional[int] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_SCREAMING_SNAKE_CASE = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 239
| 1
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[list[float]] ):
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(A__ ):
if len(A__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A__ ) )
return data_lists
def A__ ( __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = []
for dlist, weight in zip(A__ , A__ ):
lowerCamelCase__ = min(A__ )
lowerCamelCase__ = max(A__ )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(A__ )
score_lists.append(A__ )
return score_lists
def A__ ( __lowerCAmelCase : list[list[float]] ):
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A__ ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def A__ ( __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = get_data(A__ )
lowerCamelCase__ = calculate_each_score(A__ , A__ )
lowerCamelCase__ = generate_final_scores(A__ )
# append scores to source data
for i, ele in enumerate(A__ ):
source_data[i].append(A__ )
return source_data
| 50
|
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( A__ : float , A__ : float , A__ : float ) -> tuple:
lowerCamelCase_ : Dict = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Dict ="dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE_ : Tuple =(
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE_ : Any ="image_qa"
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE_ : List[str] =["image", "text"]
SCREAMING_SNAKE_CASE_ : Optional[int] =["text"]
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
return self.model(**SCREAMING_SNAKE_CASE_ ).logits
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
UpperCamelCase__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 469
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ =datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _A ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] =None
def __UpperCamelCase ( A , A , ):
import pyspark
def generate_fn():
UpperCamelCase__ = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
UpperCamelCase__ = df_with_partition_id.select('''*''' ).where(f"part_id = {partition_id}" ).drop('''part_id''' )
UpperCamelCase__ = partition_df.collect()
UpperCamelCase__ = 0
for row in rows:
yield f"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class _A ( _BaseExamplesIterable ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = df
UpperCamelCase__ = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCamelCase__ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ) -> int:
'''simple docstring'''
yield from self.generate_examples_fn()
def _a (self , SCREAMING_SNAKE_CASE_ ) -> "SparkExamplesIterable":
'''simple docstring'''
UpperCamelCase__ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE_ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> "SparkExamplesIterable":
'''simple docstring'''
UpperCamelCase__ = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE_ )
@property
def _a (self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _A ( datasets.DatasetBuilder ):
SCREAMING_SNAKE_CASE_ : List[str] =SparkConfig
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
UpperCamelCase__ = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCamelCase__ = df
UpperCamelCase__ = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE_ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE_ , )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE_ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCamelCase__ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def _a (self ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
UpperCamelCase__ = self.df.count()
UpperCamelCase__ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCamelCase__ = (
self.df.limit(SCREAMING_SNAKE_CASE_ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE_ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCamelCase__ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCamelCase__ = min(SCREAMING_SNAKE_CASE_ , int(approx_total_size / max_shard_size ) )
UpperCamelCase__ = self.df.repartition(SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
UpperCamelCase__ = ParquetWriter if file_format == '''parquet''' else ArrowWriter
UpperCamelCase__ = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) if self._working_dir else fpath
UpperCamelCase__ = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCamelCase__ = self.config.features
UpperCamelCase__ = self._writer_batch_size
UpperCamelCase__ = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCamelCase__ = pyspark.TaskContext().taskAttemptId()
UpperCamelCase__ = next(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
UpperCamelCase__ = 0
UpperCamelCase__ = writer_class(
features=SCREAMING_SNAKE_CASE_ , path=working_fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , writer_batch_size=SCREAMING_SNAKE_CASE_ , storage_options=SCREAMING_SNAKE_CASE_ , embed_local_files=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCamelCase__ , UpperCamelCase__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
UpperCamelCase__ = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , writer_batch_size=SCREAMING_SNAKE_CASE_ , storage_options=SCREAMING_SNAKE_CASE_ , embed_local_files=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE_ )
if writer._num_bytes > 0:
UpperCamelCase__ , UpperCamelCase__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase__ = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , os.path.basename(SCREAMING_SNAKE_CASE_ ) )
shutil.move(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE_ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "arrow" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
'''simple docstring'''
self._validate_cache_dir()
UpperCamelCase__ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = not is_remote_filesystem(self._fs )
UpperCamelCase__ = os.path.join if is_local else posixpath.join
UpperCamelCase__ = '''-TTTTT-SSSSS-of-NNNNN'''
UpperCamelCase__ = F"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
UpperCamelCase__ = path_join(self._output_dir , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = []
UpperCamelCase__ = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = total_num_examples
UpperCamelCase__ = total_num_bytes
# should rename everything at the end
logger.debug(F"Renaming {total_shards} shards." )
if total_shards > 1:
UpperCamelCase__ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCamelCase__ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
rename(
SCREAMING_SNAKE_CASE_ , fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , fpath.replace('''TTTTT-SSSSS''' , F"{global_shard_id:05d}" ).replace('''NNNNN''' , F"{total_shards:05d}" ) , )
UpperCamelCase__ = []
UpperCamelCase__ = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase__ , UpperCamelCase__ = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ).map(lambda SCREAMING_SNAKE_CASE_ : _rename_shard(*SCREAMING_SNAKE_CASE_ ) ).collect()
else:
# don't use any pattern
UpperCamelCase__ = 0
UpperCamelCase__ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , fpath.replace(SCREAMING_SNAKE_CASE_ , '''''' ) , )
def _a (self , SCREAMING_SNAKE_CASE_ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 469
| 1
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase_ ( __snake_case ):
"""simple docstring"""
a_ =["""pixel_values"""]
def __init__( self : Union[str, Any] , _a : bool = True , _a : Dict[str, int] = None , _a : PILImageResampling = PILImageResampling.BICUBIC , _a : bool = True , _a : Dict[str, int] = None , _a : bool = True , _a : Union[int, float] = 1 / 255 , _a : bool = True , _a : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _a : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_a : List[str] , ) -> Optional[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] = size if size is not None else {'shortest_edge': 224}
__lowerCamelCase : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__lowerCamelCase : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
__lowerCamelCase : int = do_resize
__lowerCamelCase : Optional[int] = size
__lowerCamelCase : Any = resample
__lowerCamelCase : Tuple = do_center_crop
__lowerCamelCase : Optional[int] = crop_size
__lowerCamelCase : Any = do_rescale
__lowerCamelCase : str = rescale_factor
__lowerCamelCase : Union[str, Any] = do_normalize
__lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCamelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowercase ( self : List[str] , _a : np.ndarray , _a : Dict[str, int] , _a : PILImageResampling = PILImageResampling.BICUBIC , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Dict , ) -> Optional[int]:
__lowerCamelCase : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCamelCase : List[Any] = int((256 / 224) * size['shortest_edge'] )
__lowerCamelCase : Dict = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size_dict['height'], size_dict['width']) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[Any] , _a : np.ndarray , _a : Dict[str, int] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Dict , ) -> Tuple:
__lowerCamelCase : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any , _a : np.ndarray , _a : Union[int, float] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : str , ) -> Optional[Any]:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[int] , _a : np.ndarray , _a : Union[float, List[float]] , _a : Union[float, List[float]] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Dict , ) -> int:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowercase ( self : int , _a : ImageInput , _a : Optional[bool] = None , _a : Optional[Dict[str, int]] = None , _a : PILImageResampling = None , _a : Optional[bool] = None , _a : Optional[Dict[str, int]] = None , _a : Optional[bool] = None , _a : Optional[float] = None , _a : Optional[bool] = None , _a : Optional[Union[float, Iterable[float]]] = None , _a : Optional[Union[float, Iterable[float]]] = None , _a : Optional[TensorType] = None , _a : ChannelDimension = ChannelDimension.FIRST , **_a : int , ) -> Tuple:
__lowerCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Union[str, Any] = resample if resample is not None else self.resample
__lowerCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : int = image_std if image_std is not None else self.image_std
__lowerCamelCase : List[str] = size if size is not None else self.size
__lowerCamelCase : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : str = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
__lowerCamelCase : Tuple = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCamelCase : int = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__lowerCamelCase : int = [self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
__lowerCamelCase : Dict = [self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__lowerCamelCase : Dict = [self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__lowerCamelCase : Optional[int] = [self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
__lowerCamelCase : List[Any] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
__lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 459
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : str = logging.get_logger(__name__)
def lowercase__ ( _UpperCamelCase) -> int:
"""simple docstring"""
UpperCamelCase = torch.load(_UpperCamelCase , map_location='cpu')
if "model" in sd.keys():
UpperCamelCase = torch.load(_UpperCamelCase , map_location='cpu')['model']
# pop unnecessary weights
UpperCamelCase = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase)
UpperCamelCase = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase = sd.pop(_UpperCamelCase)
UpperCamelCase = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase = key.replace('.qkv_proj.' , '.q_proj.')
UpperCamelCase = key.replace('.qkv_proj.' , '.k_proj.')
UpperCamelCase = key.replace('.qkv_proj.' , '.v_proj.')
UpperCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase , UpperCamelCase , UpperCamelCase = torch.split(_UpperCamelCase , depth // 3 , dim=0)
UpperCamelCase = q
UpperCamelCase = k
UpperCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = load_checkpoint(_UpperCamelCase)
if config is not None:
UpperCamelCase = OPTConfig.from_pretrained(_UpperCamelCase)
else:
UpperCamelCase = OPTConfig()
UpperCamelCase = OPTModel(_UpperCamelCase).half().eval()
model.load_state_dict(_UpperCamelCase)
# Check results
Path(_UpperCamelCase).mkdir(exist_ok=_UpperCamelCase)
model.save_pretrained(_UpperCamelCase)
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__magic_name__ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 280
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Dict ) -> Dict:
snake_case = inspect.getfile(accelerate.test_utils )
snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
snake_case = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def UpperCAmelCase(self : List[Any] ) -> int:
print(f'Found {torch.cuda.device_count()} devices.' )
snake_case = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase(self : Any ) -> Optional[Any]:
print(f'Found {torch.cuda.device_count()} devices.' )
snake_case = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(f'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase(self : Union[str, Any] ) -> Dict:
snake_case = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase(self : List[str] ) -> Dict:
print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' )
snake_case = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
_A = Accelerator()
_A = (accelerator.state.process_index + 2, 10)
_A = torch.randint(0, 10, shape).to(accelerator.device)
_A = ""
_A = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_A = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_A = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 294
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 294
| 1
|
from math import sqrt
def A__ ( lowerCamelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( lowerCamelCase = 1_00_01 ) -> int:
UpperCamelCase_: List[Any] = 0
UpperCamelCase_: Union[str, Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 548
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Tuple = (CMStochasticIterativeScheduler,)
__UpperCamelCase : List[str] = 10
def lowerCAmelCase__ ( self : Optional[int] , **snake_case_ : List[str] ):
UpperCamelCase_: Optional[int] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**snake_case_ )
return config
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Tuple = 10
UpperCamelCase_: Tuple = self.get_scheduler_config()
UpperCamelCase_: List[str] = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
UpperCamelCase_: int = scheduler.timesteps[0]
UpperCamelCase_: str = scheduler.timesteps[1]
UpperCamelCase_: Optional[int] = self.dummy_sample
UpperCamelCase_: int = 0.1 * sample
UpperCamelCase_: Optional[Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
UpperCamelCase_: Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase__ ( self : Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = self.scheduler_classes[0]
UpperCamelCase_: List[str] = self.get_scheduler_config()
UpperCamelCase_: Tuple = scheduler_class(**snake_case_ )
UpperCamelCase_: Optional[int] = 1
scheduler.set_timesteps(snake_case_ )
UpperCamelCase_: Any = scheduler.timesteps
UpperCamelCase_: int = torch.manual_seed(0 )
UpperCamelCase_: Any = self.dummy_model()
UpperCamelCase_: Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
UpperCamelCase_: str = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
UpperCamelCase_: List[Any] = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
UpperCamelCase_: str = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
UpperCamelCase_: Union[str, Any] = pred_prev_sample
UpperCamelCase_: int = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase_: List[Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Dict = self.scheduler_classes[0]
UpperCamelCase_: Tuple = self.get_scheduler_config()
UpperCamelCase_: List[Any] = scheduler_class(**snake_case_ )
UpperCamelCase_: str = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
UpperCamelCase_: int = scheduler.timesteps
UpperCamelCase_: List[Any] = torch.manual_seed(0 )
UpperCamelCase_: List[Any] = self.dummy_model()
UpperCamelCase_: int = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase_: Optional[int] = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
UpperCamelCase_: Dict = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
UpperCamelCase_: str = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
UpperCamelCase_: List[str] = pred_prev_sample
UpperCamelCase_: Union[str, Any] = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase_: str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: int = self.scheduler_classes[0]
UpperCamelCase_: Optional[int] = self.get_scheduler_config()
UpperCamelCase_: Tuple = scheduler_class(**snake_case_ )
UpperCamelCase_: Any = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=snake_case_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase_: Tuple = self.get_scheduler_config()
UpperCamelCase_: List[str] = scheduler_class(**snake_case_ )
UpperCamelCase_: Tuple = [39, 30, 12, 1, 0]
UpperCamelCase_: List[Any] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Any = self.scheduler_classes[0]
UpperCamelCase_: int = self.get_scheduler_config()
UpperCamelCase_: int = scheduler_class(**snake_case_ )
UpperCamelCase_: int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 548
| 1
|
from math import factorial
def _lowerCAmelCase ( A__ , A__ ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(A__ ) // (factorial(A__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 2_56
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[MinHash]:
if len(_UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=_UpperCAmelCase )
for token in set(_UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Set[str]:
return {t for t in NON_ALPHA.split(_UpperCAmelCase ) if len(t.strip() ) > 0}
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , *,
SCREAMING_SNAKE_CASE_ : float = 0.85 , ):
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : MinHash ):
_a = self._index.query(SCREAMING_SNAKE_CASE_ )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(SCREAMING_SNAKE_CASE_ )
# reformat the cluster to be a list of dict
_a = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE_ )
return duplicate_clusters
def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
_a = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCAmelCase , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
_a = DuplicationIndex(duplication_jaccard_threshold=_UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCAmelCase ) ) , max_queue_size=100 ) ):
di.add(_UpperCAmelCase , _UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
_a = get_tokens(_UpperCAmelCase )
_a = get_tokens(_UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_a = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_UpperCAmelCase , _UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(_UpperCAmelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCAmelCase , _UpperCAmelCase , ) , total=len(_UpperCAmelCase ) , ):
extremes_list.append(_UpperCAmelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_a = make_duplicate_clusters(_UpperCAmelCase , _UpperCAmelCase )
_a = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys() )
_a = dataset.filter(lambda _UpperCAmelCase , _UpperCAmelCase : idx not in remove_indices , with_indices=_UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['base_index'] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['base_index']]['copies']
print(f"""Original dataset size: {len(_UpperCAmelCase )}""" )
print(f"""Number of duplicate clusters: {len(_UpperCAmelCase )}""" )
print(f"""Files in duplicate cluster: {len(_UpperCAmelCase )}""" )
print(f"""Unique files in duplicate cluster: {len(_UpperCAmelCase )}""" )
print(f"""Filtered dataset size: {len(_UpperCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 562
|
lowercase_ = 9.80665
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = g ) -> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 562
| 1
|
import math
class lowerCAmelCase_ :
def a_ ( self : Tuple , UpperCAmelCase_ : list[list[float]] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = 0.0
_UpperCAmelCase : Any = 0.0
for i in range(len(UpperCAmelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a_ ( self : int , UpperCAmelCase_ : list[list[int | float]] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float ) -> list[list[int | float]]:
'''simple docstring'''
for i in range(len(UpperCAmelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _A ( ):
# Training Examples ( m, n )
_UpperCAmelCase : Tuple = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase : str = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase : Optional[Any] = SelfOrganizingMap()
_UpperCAmelCase : Dict = 3
_UpperCAmelCase : int = 0.5
for _ in range(_UpperCamelCase ):
for j in range(len(_UpperCamelCase ) ):
# training sample
_UpperCAmelCase : List[str] = training_samples[j]
# Compute the winning vector
_UpperCAmelCase : str = self_organizing_map.get_winner(_UpperCamelCase , _UpperCamelCase )
# Update the winning vector
_UpperCAmelCase : Optional[int] = self_organizing_map.update(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# classify test sample
_UpperCAmelCase : Optional[Any] = [0, 0, 0, 1]
_UpperCAmelCase : List[str] = self_organizing_map.get_winner(_UpperCamelCase , _UpperCamelCase )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 710
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = Dict[str, Any]
UpperCAmelCase__ : List[str] = List[Prediction]
@add_end_docstrings(lowercase_ )
class lowerCAmelCase_ ( lowercase_ ):
def __init__( self : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def a_ ( self : str , **UpperCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = {}
if "threshold" in kwargs:
_UpperCAmelCase : List[str] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def a_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = load_image(UpperCAmelCase_ )
_UpperCAmelCase : Dict = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase : Dict = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
_UpperCAmelCase : Optional[Any] = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
_UpperCAmelCase : Tuple = target_size
return inputs
def a_ ( self : List[str] , UpperCAmelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = model_inputs.pop('''target_size''' )
_UpperCAmelCase : int = self.model(**UpperCAmelCase_ )
_UpperCAmelCase : int = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase : Tuple = model_inputs['''bbox''']
return model_outputs
def a_ ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=0.9 ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase : Tuple = target_size[0].tolist()
def unnormalize(UpperCAmelCase_ : List[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase : List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase : List[Any] = [unnormalize(UpperCAmelCase_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
_UpperCAmelCase : Union[str, Any] = ['''score''', '''label''', '''box''']
_UpperCAmelCase : Optional[Any] = [dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for vals in zip(scores.tolist() , UpperCAmelCase_ , UpperCAmelCase_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase : Optional[int] = self.image_processor.post_process_object_detection(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Any = raw_annotations[0]
_UpperCAmelCase : List[str] = raw_annotation['''scores''']
_UpperCAmelCase : str = raw_annotation['''labels''']
_UpperCAmelCase : Dict = raw_annotation['''boxes''']
_UpperCAmelCase : List[str] = scores.tolist()
_UpperCAmelCase : int = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase : Any = [self._get_bounding_box(UpperCAmelCase_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase : Tuple = ['''score''', '''label''', '''box''']
_UpperCAmelCase : Any = [
dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def a_ ( self : Optional[int] , UpperCAmelCase_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = box.int().tolist()
_UpperCAmelCase : Optional[Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 416
| 0
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> int:
lowerCamelCase : List[str] = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase , lowerCamelCase : List[Any] = get_aligned_output_features_output_indices(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["c"] )
self.assertEqual(UpperCamelCase__ , [2] )
# Out indices set to match out features
lowerCamelCase , lowerCamelCase : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["a", "c"] )
self.assertEqual(UpperCamelCase__ , [0, 2] )
# Out features set to match out indices
lowerCamelCase , lowerCamelCase : Optional[Any] = get_aligned_output_features_output_indices(UpperCamelCase__ , [0, 2] , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["a", "c"] )
self.assertEqual(UpperCamelCase__ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase , lowerCamelCase : Any = get_aligned_output_features_output_indices(UpperCamelCase__ , [-3, -1] , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ["a", "c"] )
self.assertEqual(UpperCamelCase__ , [-3, -1] )
def _lowercase ( self ) -> str:
# Stage names must be set
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , UpperCamelCase__ )
# Out features must be a list
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Optional[int] = BackboneMixin()
lowerCamelCase : List[Any] = ["a", "b", "c"]
lowerCamelCase : List[Any] = ["a", "c"]
lowerCamelCase : Union[str, Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase : Any = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 311
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCamelCase : Dict = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Dict = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCamelCase : int = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCamelCase : int = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Optional[int] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = val
def A ( _SCREAMING_SNAKE_CASE ) -> int:
if "handwritten" in checkpoint_url:
lowerCamelCase : Optional[Any] = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase : Optional[int] = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
lowerCamelCase : Dict = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Optional[Any] = ViTConfig(image_size=384 ,qkv_bias=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCamelCase : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCamelCase : str = 1024
lowerCamelCase : Any = 4096
lowerCamelCase : str = 24
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : Any = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = "relu"
lowerCamelCase : str = 1024
lowerCamelCase : Optional[int] = True
lowerCamelCase : Any = False
lowerCamelCase : Any = False
# load HuggingFace model
lowerCamelCase : Dict = ViTModel(_SCREAMING_SNAKE_CASE ,add_pooling_layer=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = TrOCRForCausalLM(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
model.eval()
# load state_dict of original model, rename some keys
lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE ,map_location="cpu" ,check_hash=_SCREAMING_SNAKE_CASE )["model"]
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCamelCase : List[str] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith("decoder" ) and "output_projection" not in key:
lowerCamelCase : Optional[int] = val
else:
lowerCamelCase : int = val
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=encoder_config.image_size )
lowerCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-large" )
lowerCamelCase : Tuple = TrOCRProcessor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = processor(images=prepare_img(_SCREAMING_SNAKE_CASE ) ,return_tensors="pt" ).pixel_values
# verify logits
lowerCamelCase : Any = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCamelCase : int = model(pixel_values=_SCREAMING_SNAKE_CASE ,decoder_input_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = outputs.logits
lowerCamelCase : Any = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCamelCase : Dict = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
lowerCamelCase : Optional[Any] = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
lowerCamelCase : List[Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "First elements of logits not as expected"
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 311
| 1
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=6_4 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
"""simple docstring"""
A_ : Union[str, Any] = parent
A_ : List[str] = batch_size
A_ : Dict = seq_length
A_ : Any = is_training
A_ : int = use_input_mask
A_ : Optional[Any] = use_token_type_ids
A_ : List[Any] = use_labels
A_ : int = vocab_size
A_ : Optional[int] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Any = hidden_act
A_ : int = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : List[Any] = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : int = num_labels
A_ : Union[str, Any] = num_choices
A_ : Tuple = scope
A_ : List[Any] = vocab_size - 1
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Optional[int] = None
if self.use_input_mask:
A_ : int = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : List[str] = self.prepare_config_and_inputs()
A_ : str = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = GPTNeoXModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase )
A_ : Dict = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : List[str] = True
A_ : Dict = GPTNeoXModel(lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Optional[int] = GPTNeoXForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.num_labels
A_ : Tuple = GPTNeoXForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self.num_labels
A_ : Tuple = GPTNeoXForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : str = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = self.num_labels
A_ : str = GPTNeoXForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : List[Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : List[Any] = True
A_ : Any = GPTNeoXForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
# first forward pass
A_ : str = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
A_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
A_ : Any = model(lowercase , attention_mask=lowercase , output_hidden_states=lowercase )
A_ : int = output_from_no_past['hidden_states'][0]
A_ : Tuple = model(
lowercase , attention_mask=lowercase , past_key_values=lowercase , output_hidden_states=lowercase , )['hidden_states'][0]
# select random slice
A_ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : Optional[int] = config_and_inputs
A_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = GPTNeoXModelTester(self )
A_ : str = ConfigTester(self , config_class=lowercase , hidden_size=6_4 , num_attention_heads=8 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ : Dict = None
self.model_tester.create_and_check_model_as_decoder(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = ids_tensor([1, 1_0] , config.vocab_size )
A_ : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = GPTNeoXModel(lowercase )
original_model.to(lowercase )
original_model.eval()
A_ : Tuple = original_model(lowercase ).last_hidden_state
A_ : Any = original_model(lowercase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A_ : Union[str, Any] = {'type': scaling_type, 'factor': 10.0}
A_ : List[str] = GPTNeoXModel(lowercase )
scaled_model.to(lowercase )
scaled_model.eval()
A_ : Tuple = scaled_model(lowercase ).last_hidden_state
A_ : Optional[Any] = scaled_model(lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A_ : Optional[int] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowercase )
A_ : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowercase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A_ : Any = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A_ : List[Any] = model.generate(**lowercase , do_sample=lowercase , max_new_tokens=2_0 )
A_ : Optional[Any] = tokenizer.batch_decode(lowercase )[0]
self.assertEqual(lowercase , lowercase )
| 70
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70
| 1
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 359
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654
| 0
|
import gc
import threading
import time
import psutil
import torch
class lowercase :
def __init__( self ):
snake_case_ = psutil.Process()
snake_case_ = False
def a ( self ):
snake_case_ = -1
while True:
snake_case_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def a ( self ):
snake_case_ = True
snake_case_ = threading.Thread(target=self.peak_monitor )
snake_case_ = True
self.thread.start()
def a ( self ):
snake_case_ = False
self.thread.join()
return self.cpu_memory_peak
_UpperCAmelCase : Dict = PeakCPUMemory()
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case_ = torch.cuda.memory_allocated(UpperCamelCase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case_ = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
snake_case_ = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case_ = (torch.cuda.memory_allocated(UpperCamelCase__ ) - start_measures[str(UpperCamelCase__ )]) / 2**20
snake_case_ = (torch.cuda.max_memory_allocated(UpperCamelCase__ ) - start_measures[str(UpperCamelCase__ )]) / 2**20
return measures
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
print(F'''{description}:''' )
print(F'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(UpperCamelCase__ )]:.2f}MiB''' )
snake_case_ = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 706
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : Node | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Node(1 )
snake_case_ = Node(2 )
snake_case_ = Node(3 )
snake_case_ = Node(4 )
snake_case_ = Node(5 )
return tree
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
if root is None:
return output
snake_case_ = deque([root] )
while process_queue:
snake_case_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
def populate_output(UpperCamelCase__ , UpperCamelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
def populate_output(UpperCamelCase__ , UpperCamelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if root is None:
return []
snake_case_ = []
snake_case_ = 0
snake_case_ = height(UpperCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case_ = 1
else:
output.append(get_nodes_from_right_to_left(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case_ = 0
return output
def __lowerCamelCase ( ): # Main function for testing.
'''simple docstring'''
snake_case_ = make_tree()
print(F'''In-order Traversal: {inorder(UpperCamelCase__ )}''' )
print(F'''Pre-order Traversal: {preorder(UpperCamelCase__ )}''' )
print(F'''Post-order Traversal: {postorder(UpperCamelCase__ )}''' , '\n' )
print(F'''Height of Tree: {height(UpperCamelCase__ )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(UpperCamelCase__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(UpperCamelCase__ ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(UpperCamelCase__ , level=UpperCamelCase__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 108
| 0
|
import math
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] = 0 , lowerCamelCase_ : int = 0 ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = end or len(_a )
for i in range(_a , _a ):
SCREAMING_SNAKE_CASE_ : List[Any] = i
SCREAMING_SNAKE_CASE_ : Any = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE_ : Tuple = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_index_value
return array
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] ) -> Optional[int]: # Max Heap
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = index
SCREAMING_SNAKE_CASE_ : str = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE_ : List[Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE_ : Tuple = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE_ : Optional[int] = right_index
if largest != index:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = array[largest], array[index]
heapify(_a , _a , _a )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(_a )
for i in range(n // 2 , -1 , -1 ):
heapify(_a , _a , _a )
for i in range(n - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE_ : int = array[0], array[i]
heapify(_a , 0 , _a )
return array
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = low
SCREAMING_SNAKE_CASE_ : List[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE_ : List[Any] = array[j], array[i]
i += 1
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if len(_a ) == 0:
return array
SCREAMING_SNAKE_CASE_ : int = 2 * math.ceil(math.loga(len(_a ) ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = 16
return intro_sort(_a , 0 , len(_a ) , _a , _a )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_a )
max_depth -= 1
SCREAMING_SNAKE_CASE_ : Optional[int] = median_of_a(_a , _a , start + ((end - start) // 2) + 1 , end - 1 )
SCREAMING_SNAKE_CASE_ : List[str] = partition(_a , _a , _a , _a )
intro_sort(_a , _a , _a , _a , _a )
SCREAMING_SNAKE_CASE_ : int = p
return insertion_sort(_a , _a , _a )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : List[Any] = input('''Enter numbers separated by a comma : ''').strip()
UpperCamelCase__ : Any = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 105
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 25
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self: int , __UpperCamelCase: List[str] , __UpperCamelCase: str=3 , __UpperCamelCase: str=32 , __UpperCamelCase: Dict=3 , __UpperCamelCase: List[Any]=10 , __UpperCamelCase: Tuple=[10, 20, 30, 40] , __UpperCamelCase: int=[1, 1, 2, 1] , __UpperCamelCase: str=True , __UpperCamelCase: Optional[int]=True , __UpperCamelCase: Tuple="relu" , __UpperCamelCase: Any=3 , __UpperCamelCase: Union[str, Any]=None , ):
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = embeddings_size
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_act
__magic_name__ = num_labels
__magic_name__ = scope
__magic_name__ = len(__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _SCREAMING_SNAKE_CASE ( self: Any , __UpperCamelCase: int , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Tuple ):
'''simple docstring'''
__magic_name__ = TFResNetModel(config=__UpperCamelCase )
__magic_name__ = model(__UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Tuple , __UpperCamelCase: str ):
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = TFResNetForImageClassification(__UpperCamelCase )
__magic_name__ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__, __magic_name__, __magic_name__ = config_and_inputs
__magic_name__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_lowercase : Tuple = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Union[str, Any] = False
_lowercase : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = TFResNetModelTester(self )
__magic_name__ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__, __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase: Any , __UpperCamelCase: List[Any] , __UpperCamelCase: Optional[int] ):
__magic_name__ = model_class(__UpperCamelCase )
__magic_name__ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__magic_name__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__magic_name__, __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__magic_name__ = layer_type
__magic_name__ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = TFResNetModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=__UpperCamelCase , return_tensors='tf' )
# forward pass
__magic_name__ = model(**__UpperCamelCase )
# verify the logits
__magic_name__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__magic_name__ = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCamelCase , atol=1E-4 ) )
| 184
|
from __future__ import annotations
def _lowercase ( a_ : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
__magic_name__ = nums[0]
__magic_name__ = 0
for num in nums[1:]:
__magic_name__, __magic_name__ = (
max_excluding + num,
max(a_ ,a_ ),
)
return max(a_ ,a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184
| 1
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase ( a__ , a__ , a__ ):
lowercase : Optional[int] = [R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 5_0257 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 768 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "gelu_new" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 1e-5 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , ):
super().__init__()
UpperCamelCase : List[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
UpperCamelCase : int = prefix_inner_dim
UpperCamelCase : Dict = prefix_hidden_dim
UpperCamelCase : Union[str, Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCamelCase : Tuple = (
nn.Linear(self.prefix_hidden_dim , _snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCamelCase : Any = GPTaConfig(
vocab_size=_snake_case , n_positions=_snake_case , n_embd=_snake_case , n_layer=_snake_case , n_head=_snake_case , n_inner=_snake_case , activation_function=_snake_case , resid_pdrop=_snake_case , embd_pdrop=_snake_case , attn_pdrop=_snake_case , layer_norm_epsilon=_snake_case , initializer_range=_snake_case , scale_attn_weights=_snake_case , use_cache=_snake_case , scale_attn_by_inverse_layer_idx=_snake_case , reorder_and_upcast_attn=_snake_case , )
UpperCamelCase : str = GPTaLMHeadModel(_snake_case )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
UpperCamelCase : Optional[int] = self.transformer.transformer.wte(_snake_case )
UpperCamelCase : List[str] = self.encode_prefix(_snake_case )
UpperCamelCase : Optional[Any] = self.decode_prefix(_snake_case )
UpperCamelCase : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCamelCase : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCamelCase : List[str] = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCamelCase : str = self.transformer(inputs_embeds=_snake_case , labels=_snake_case , attention_mask=_snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return torch.zeros(_snake_case , self.prefix_length , dtype=torch.intaa , device=_snake_case )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.encode_prefix(_snake_case )
@torch.no_grad()
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = torch.split(_snake_case , 1 , dim=0 )
UpperCamelCase : List[Any] = []
UpperCamelCase : Tuple = []
for feature in features:
UpperCamelCase : Optional[int] = self.decode_prefix(feature.to(_snake_case ) ) # back to the clip feature
# Only support beam search for now
UpperCamelCase , UpperCamelCase : str = self.generate_beam(
input_embeds=_snake_case , device=_snake_case , eos_token_id=_snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCamelCase : Dict = torch.stack(_snake_case )
UpperCamelCase : Dict = torch.stack(_snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def a_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = 5 , SCREAMING_SNAKE_CASE_ = 67 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ):
UpperCamelCase : Tuple = eos_token_id
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = torch.ones(_snake_case , device=_snake_case , dtype=torch.int )
UpperCamelCase : Optional[Any] = torch.zeros(_snake_case , device=_snake_case , dtype=torch.bool )
if input_embeds is not None:
UpperCamelCase : Tuple = input_embeds
else:
UpperCamelCase : Union[str, Any] = self.transformer.transformer.wte(_snake_case )
for i in range(_snake_case ):
UpperCamelCase : Dict = self.transformer(inputs_embeds=_snake_case )
UpperCamelCase : Optional[int] = outputs.logits
UpperCamelCase : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCamelCase : List[Any] = logits.softmax(-1 ).log()
if scores is None:
UpperCamelCase , UpperCamelCase : Optional[Any] = logits.topk(_snake_case , -1 )
UpperCamelCase : Dict = generated.expand(_snake_case , *generated.shape[1:] )
UpperCamelCase , UpperCamelCase : int = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCamelCase : Optional[int] = next_tokens
else:
UpperCamelCase : Any = tokens.expand(_snake_case , *tokens.shape[1:] )
UpperCamelCase : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCamelCase : Union[str, Any] = -float(np.inf )
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCamelCase : Tuple = scores_sum / seq_lengths[:, None]
UpperCamelCase , UpperCamelCase : str = scores_sum_average.view(-1 ).topk(_snake_case , -1 )
UpperCamelCase : int = next_tokens // scores_sum.shape[1]
UpperCamelCase : List[Any] = seq_lengths[next_tokens_source]
UpperCamelCase : int = next_tokens % scores_sum.shape[1]
UpperCamelCase : Optional[int] = next_tokens.unsqueeze(1 )
UpperCamelCase : Any = tokens[next_tokens_source]
UpperCamelCase : int = torch.cat((tokens, next_tokens) , dim=1 )
UpperCamelCase : List[Any] = generated[next_tokens_source]
UpperCamelCase : List[Any] = scores_sum_average * seq_lengths
UpperCamelCase : int = is_stopped[next_tokens_source]
UpperCamelCase : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCamelCase : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
UpperCamelCase : List[Any] = is_stopped + next_tokens.eq(_snake_case ).squeeze()
if is_stopped.all():
break
UpperCamelCase : Dict = scores / seq_lengths
UpperCamelCase : str = scores.argsort(descending=_snake_case )
# tokens tensors are already padded to max_seq_length
UpperCamelCase : Optional[int] = [tokens[i] for i in order]
UpperCamelCase : int = torch.stack(_snake_case , dim=0 )
UpperCamelCase : int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 499
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4
| 0
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = ['''model.decoder.embed_positions.weights''']
def UpperCamelCase ( snake_case__):
if "emb" in name:
lowerCAmelCase_ : int = name.replace("emb" , "model.decoder.embed_tokens")
if "transformer" in name:
lowerCAmelCase_ : Tuple = name.replace("transformer" , "model.decoder")
if "cross_attention" in name:
lowerCAmelCase_ : List[Any] = name.replace("cross_attention" , "encoder_attn")
if "linear1" in name:
lowerCAmelCase_ : int = name.replace("linear1" , "fc1")
if "linear2" in name:
lowerCAmelCase_ : str = name.replace("linear2" , "fc2")
if "norm1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace("norm1" , "self_attn_layer_norm")
if "norm_cross" in name:
lowerCAmelCase_ : Dict = name.replace("norm_cross" , "encoder_attn_layer_norm")
if "norm2" in name:
lowerCAmelCase_ : Optional[int] = name.replace("norm2" , "final_layer_norm")
if "out_norm" in name:
lowerCAmelCase_ : int = name.replace("out_norm" , "model.decoder.layer_norm")
if "linears" in name:
lowerCAmelCase_ : str = name.replace("linears" , "lm_heads")
if "condition_provider.conditioners.description.output_proj" in name:
lowerCAmelCase_ : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj")
return name
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = list(state_dict.keys())
lowerCAmelCase_ : Dict = {}
for key in keys:
lowerCAmelCase_ : Any = state_dict.pop(snake_case__)
lowerCAmelCase_ : Optional[int] = rename_keys(snake_case__)
if "in_proj_weight" in key:
# split fused qkv proj
lowerCAmelCase_ : List[Any] = val[:hidden_size, :]
lowerCAmelCase_ : List[str] = val[hidden_size : 2 * hidden_size, :]
lowerCAmelCase_ : int = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCAmelCase_ : Union[str, Any] = val
else:
lowerCAmelCase_ : Optional[int] = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase ( snake_case__):
if checkpoint == "small":
# default config values
lowerCAmelCase_ : int = 10_24
lowerCAmelCase_ : Optional[int] = 24
lowerCAmelCase_ : Optional[int] = 16
elif checkpoint == "medium":
lowerCAmelCase_ : Dict = 15_36
lowerCAmelCase_ : Optional[Any] = 48
lowerCAmelCase_ : Optional[Any] = 24
elif checkpoint == "large":
lowerCAmelCase_ : Optional[Any] = 20_48
lowerCAmelCase_ : Optional[Any] = 48
lowerCAmelCase_ : int = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''')
lowerCAmelCase_ : Dict = MusicgenDecoderConfig(
hidden_size=snake_case__ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , )
return config
@torch.no_grad()
def UpperCamelCase ( snake_case__ , snake_case__=None , snake_case__=None , snake_case__="cpu"):
lowerCAmelCase_ : List[Any] = MusicGen.get_pretrained(snake_case__ , device=snake_case__)
lowerCAmelCase_ : Dict = decoder_config_from_checkpoint(snake_case__)
lowerCAmelCase_ : Dict = fairseq_model.lm.state_dict()
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = rename_state_dict(
snake_case__ , hidden_size=decoder_config.hidden_size)
lowerCAmelCase_ : List[Any] = TaEncoderModel.from_pretrained("t5-base")
lowerCAmelCase_ : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz")
lowerCAmelCase_ : int = MusicgenForCausalLM(snake_case__).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = decoder.load_state_dict(snake_case__ , strict=snake_case__)
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder")) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case__)
if len(snake_case__) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''')
if len(snake_case__) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''')
# init the composite model
lowerCAmelCase_ : List[str] = MusicgenForConditionalGeneration(text_encoder=snake_case__ , audio_encoder=snake_case__ , decoder=snake_case__)
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case__)
# check we can do a forward pass
lowerCAmelCase_ : Tuple = torch.arange(0 , 8 , dtype=torch.long).reshape(2 , -1)
lowerCAmelCase_ : List[str] = input_ids.reshape(2 * 4 , -1)
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(input_ids=snake_case__ , decoder_input_ids=snake_case__).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("Incorrect shape for logits")
# now construct the processor
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained("t5-base")
lowerCAmelCase_ : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left")
lowerCAmelCase_ : Optional[Any] = MusicgenProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__)
# set the appropriate bos/pad token ids
lowerCAmelCase_ : Any = 20_48
lowerCAmelCase_ : str = 20_48
# set other default generation config params
lowerCAmelCase_ : int = int(30 * audio_encoder.config.frame_rate)
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(snake_case__).mkdir(exist_ok=snake_case__)
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''')
model.save_pretrained(snake_case__)
processor.save_pretrained(snake_case__)
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''')
model.push_to_hub(snake_case__)
processor.push_to_hub(snake_case__)
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_lowercase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 683
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683
| 1
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCamelCase__ : List[Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( _lowerCamelCase: torch.nn.Module , _lowerCamelCase: BnbQuantizationConfig , _lowerCamelCase: Union[str, os.PathLike] = None , _lowerCamelCase: Optional[Dict[str, Union[int, str, torch.device]]] = None , _lowerCamelCase: Optional[List[str]] = None , _lowerCamelCase: Optional[Dict[Union[int, str], Union[int, str]]] = None , _lowerCamelCase: Optional[Union[str, os.PathLike]] = None , _lowerCamelCase: bool = False , ):
__SCREAMING_SNAKE_CASE : Tuple = bnb_quantization_config.load_in_abit
__SCREAMING_SNAKE_CASE : List[str] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
__SCREAMING_SNAKE_CASE : int = []
# custom device map
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(device_map.keys() ) > 1:
__SCREAMING_SNAKE_CASE : int = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__SCREAMING_SNAKE_CASE : Optional[int] = get_keys_to_not_convert(_lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCamelCase )
# compatibility with peft
__SCREAMING_SNAKE_CASE : Any = load_in_abit
__SCREAMING_SNAKE_CASE : List[str] = load_in_abit
__SCREAMING_SNAKE_CASE : int = get_parameter_device(_lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
__SCREAMING_SNAKE_CASE : Optional[int] = replace_with_bnb_layers(_lowerCamelCase , _lowerCamelCase , modules_to_not_convert=_lowerCamelCase )
# convert param to the right dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
__SCREAMING_SNAKE_CASE : Any = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCamelCase ):
param.to(_lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
__SCREAMING_SNAKE_CASE : Tuple = replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , modules_to_not_convert=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = get_quantized_model_device_map(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , max_memory=_lowerCamelCase , no_split_module_classes=_lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCamelCase , offload_state_dict=_lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCamelCase , device_map=_lowerCamelCase , offload_dir=_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Any , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: Any=None , _lowerCamelCase: Optional[int]=None ):
if device_map is None:
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE : Union[str, Any] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
__SCREAMING_SNAKE_CASE : List[str] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__SCREAMING_SNAKE_CASE : Tuple = {}
__SCREAMING_SNAKE_CASE : Any = special_dtypes
__SCREAMING_SNAKE_CASE : Tuple = no_split_module_classes
__SCREAMING_SNAKE_CASE : Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__SCREAMING_SNAKE_CASE : List[str] = get_balanced_memory(
_lowerCamelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCamelCase , **_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Any = max_memory
__SCREAMING_SNAKE_CASE : Any = infer_auto_device_map(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# check if don't have any quantized module on the cpu
__SCREAMING_SNAKE_CASE : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__SCREAMING_SNAKE_CASE : List[str] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: List[Any] , _lowerCamelCase: int=None , _lowerCamelCase: List[Any]=None ):
if modules_to_not_convert is None:
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = _replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: List[Any] , _lowerCamelCase: Any=None , _lowerCamelCase: Optional[int]=None , ):
__SCREAMING_SNAKE_CASE : List[Any] = False
for name, module in model.named_children():
if current_key_name is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
current_key_name.append(_lowerCamelCase )
if isinstance(_lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__SCREAMING_SNAKE_CASE : Optional[Any] = """.""".join(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__SCREAMING_SNAKE_CASE : int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__SCREAMING_SNAKE_CASE : Any = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__SCREAMING_SNAKE_CASE : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
__SCREAMING_SNAKE_CASE : Tuple = module.weight.data
if module.bias is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = module.bias.data
bnb_module.requires_grad_(_lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = True
if len(list(module.children() ) ) > 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = _replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
# Create a copy of the model
with init_empty_weights():
__SCREAMING_SNAKE_CASE : Union[str, Any] = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__SCREAMING_SNAKE_CASE : Optional[int] = find_tied_parameters(_lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__SCREAMING_SNAKE_CASE : int = sum(_lowerCamelCase , [] )
__SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) > 0
# Check if it is a base model
__SCREAMING_SNAKE_CASE : List[Any] = False
if hasattr(_lowerCamelCase , """base_model_prefix""" ):
__SCREAMING_SNAKE_CASE : List[Any] = not hasattr(_lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__SCREAMING_SNAKE_CASE : int = list(model.named_children() )
__SCREAMING_SNAKE_CASE : str = [list_modules[-1][0]]
# add last module together with tied weights
__SCREAMING_SNAKE_CASE : Dict = set(_lowerCamelCase ) - set(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase )
# remove ".weight" from the keys
__SCREAMING_SNAKE_CASE : str = [""".weight""", """.bias"""]
__SCREAMING_SNAKE_CASE : List[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace(_lowerCamelCase , """""" )
filtered_module_names.append(_lowerCamelCase )
return filtered_module_names
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
for m in model.modules():
if isinstance(_lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( _lowerCamelCase: nn.Module ):
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Union[str, Any] ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCamelCase , _lowerCamelCase , 0 , dtype=_lowerCamelCase , value=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = param_name
__SCREAMING_SNAKE_CASE : Optional[int] = model
if "." in tensor_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
__SCREAMING_SNAKE_CASE : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
__SCREAMING_SNAKE_CASE : List[str] = new_module
__SCREAMING_SNAKE_CASE : Optional[int] = splits[-1]
# offload weights
__SCREAMING_SNAKE_CASE : Optional[Any] = False
offload_weight(module._parameters[tensor_name] , _lowerCamelCase , _lowerCamelCase , index=_lowerCamelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCamelCase , index=_lowerCamelCase , )
else:
offload_weight(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index=_lowerCamelCase )
offload_weight(_lowerCamelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCamelCase , index=_lowerCamelCase )
set_module_tensor_to_device(_lowerCamelCase , _lowerCamelCase , """meta""" , dtype=_lowerCamelCase , value=torch.empty(*param.size() ) )
| 578
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : int = (DDPMScheduler,)
def UpperCamelCase__ ( self : Union[str, Any] , **lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**lowerCAmelCase__ )
return config
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE : List[Any] = pred_prev_sample
__SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_model()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE : Optional[int] = pred_prev_sample
__SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase__ ):
if i == len(lowerCAmelCase__ ) - 1:
__SCREAMING_SNAKE_CASE : List[str] = -1
else:
__SCREAMING_SNAKE_CASE : Dict = timesteps[i + 1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.previous_timestep(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = prev_t.item()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase__ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
| 578
| 1
|
from __future__ import annotations
UpperCAmelCase_ : int = 10
def UpperCamelCase ( _A : list[int] )-> list[int]:
"""simple docstring"""
A__ = 1
A__ = max(_A )
while placement <= max_digit:
# declare and initialize empty buckets
A__ = [[] for _ in range(_A )]
# split list_of_ints between the buckets
for i in list_of_ints:
A__ = int((i / placement) % RADIX )
buckets[tmp].append(_A )
# put each buckets' contents into list_of_ints
A__ = 0
for b in range(_A ):
for i in buckets[b]:
A__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
def UpperCamelCase ( _A : list[int] , _A : int )-> bool:
"""simple docstring"""
A__ = len(_A )
A__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A__ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
A__ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
A__ = subset[i - 1][j]
if arr[i - 1] <= j:
A__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self) -> List[str]:
_A : Optional[int] = ort.SessionOptions()
_A : Any = False
return options
def _lowerCamelCase ( self) -> str:
_A : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_A : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_A : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase)
_A : Tuple = "A red cat sitting on a park bench"
_A : Dict = np.random.RandomState(0)
_A : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCamelCase , output_type="np" , )
_A : List[Any] = output.images
_A : Tuple = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _lowerCamelCase ( self) -> List[str]:
_A : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_A : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_A : Dict = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx")
_A : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase)
_A : Optional[int] = "A red cat sitting on a park bench"
_A : Union[str, Any] = np.random.RandomState(0)
_A : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCamelCase , output_type="np" , )
_A : str = output.images
_A : Optional[int] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 503
| 1
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase__ = CLIPImageProcessor()
UpperCamelCase__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
UpperCamelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 718
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase__ = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Optional[Any] = len([g for position, g in enumerate(_snake_case ) if g == main_target[position]] )
return (item, float(_snake_case ))
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : str = random.randint(0 ,len(_snake_case ) - 1 )
UpperCAmelCase__ : str = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase__ : int = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Tuple = list(_snake_case )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
UpperCAmelCase__ : int = random.choice(_snake_case )
return "".join(_snake_case )
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase__ : Tuple = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase__ : int = int(parent_a[1] * 100 ) + 1
UpperCAmelCase__ : Any = 10 if child_n >= 10 else child_n
for _ in range(_snake_case ):
UpperCAmelCase__ : Any = population_score[random.randint(0 ,_snake_case )][0]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = crossover(parent_a[0] ,_snake_case )
# Append new string to the population list.
pop.append(mutate(_snake_case ,_snake_case ) )
pop.append(mutate(_snake_case ,_snake_case ) )
return pop
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase__ : int = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_snake_case )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase__ : str = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_snake_case )
# Generate random starting population.
UpperCAmelCase__ : Any = []
for _ in range(_snake_case ):
population.append(''.join([random.choice(_snake_case ) for i in range(len(_snake_case ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_snake_case )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase__ : Union[str, Any] = [evaluate(_snake_case ,_snake_case ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase__ : List[str] = sorted(_snake_case ,key=lambda _snake_case : x[1] ,reverse=_snake_case )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase__ : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_snake_case )
# Normalize population score to be between 0 and 1.
UpperCAmelCase__ : List[Any] = [
(item, score / len(_snake_case )) for item, score in population_score
]
# This is selection
for i in range(_snake_case ):
population.extend(select(population_score[int(_snake_case )] ,_snake_case ,_snake_case ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_snake_case ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase__ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
UpperCamelCase__ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 254
| 0
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ):
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
a__ : str = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Tuple ):
a__ : Optional[Any] = None
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
a__ : List[str] = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
a__ : Any = dataset
a__ : str = name
a__ : Tuple = con
a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a__ : Any = num_proc
a__ : Tuple = to_sql_kwargs
def _UpperCamelCase( self : List[Any] ):
a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ )
a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
a__, a__, a__ : Union[str, Any] = args
a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a__ : Tuple = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a__ : str = batch.to_pandas()
a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a__, a__ : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 37
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """
UpperCamelCase : List[Any] = """=======
>>>>>>>
"""
UpperCamelCase : Optional[Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
UpperCamelCase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def UpperCamelCase_ ( __a ) -> Optional[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A__ ( A__ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ):
a__ : List[str] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ):
a__ : str = get_logger("datasets-cli/converting" )
a__ : Optional[Any] = tfds_path
a__ : Optional[int] = datasets_directory
def _UpperCamelCase( self : int ):
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a__ : Tuple = []
a__ : str = []
a__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
a__ : List[str] = os.listdir(lowerCamelCase__ )
else:
a__ : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
a__ : List[Any] = f.readlines()
a__ : Union[str, Any] = []
a__ : Union[str, Any] = False
a__ : Union[str, Any] = False
a__ : Dict = []
for line in lines:
a__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a__ : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a__ : List[str] = ""
continue
elif "from absl import logging" in out_line:
a__ : Dict = "from datasets import logging\n"
elif "getLogger" in out_line:
a__ : List[Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a__ : List[str] = True
a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a__ : Optional[Any] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a__ : Optional[int] = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a__ : Dict = f_name.replace(".py" , "" )
a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a__ : Any = os.path.basename(lowerCamelCase__ )
a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37
| 1
|
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase = get_logger(__name__)
class SCREAMING_SNAKE_CASE :
def __init__( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Any:
a_ : Any = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , __SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
a_ : Tuple = module._original_module if isinstance(__SCREAMING_SNAKE_CASE , _PatchedModuleObj ) else module
class SCREAMING_SNAKE_CASE :
snake_case__ = []
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=None ) -> Dict:
a_ : List[str] = obj
a_ : int = target
a_ : List[Any] = new
a_ : Union[str, Any] = target.split('''.''' )[0]
a_ : List[Any] = {}
a_ : str = attrs or []
def __enter__( self : Any ) -> Tuple:
a_ : Dict = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
try:
a_ : Optional[Any] = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a_ : List[str] = getattr(self.obj , __SCREAMING_SNAKE_CASE )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__SCREAMING_SNAKE_CASE , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
a_ : Tuple = obj_attr
# patch at top level
setattr(self.obj , __SCREAMING_SNAKE_CASE , _PatchedModuleObj(__SCREAMING_SNAKE_CASE , attrs=self.attrs ) )
a_ : Union[str, Any] = getattr(self.obj , __SCREAMING_SNAKE_CASE )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , _PatchedModuleObj(getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , attrs=self.attrs ) )
a_ : Any = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# finally set the target attribute
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a_ : List[Any] = getattr(import_module('''.'''.join(__SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __SCREAMING_SNAKE_CASE ) is attr_value:
a_ : Optional[int] = getattr(self.obj , __SCREAMING_SNAKE_CASE )
setattr(self.obj , __SCREAMING_SNAKE_CASE , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a_ : Any = globals()["__builtins__"][target_attr]
setattr(self.obj , __SCREAMING_SNAKE_CASE , self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , __SCREAMING_SNAKE_CASE , self.original.pop(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
self.__enter__()
self._active_patches.append(self )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 715
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666
| 0
|
"""simple docstring"""
def lowercase__(A ) ->int:
"""simple docstring"""
assert (
isinstance(A , A ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowercase__, lowercase__ : str= 1, 1
for _ in range(number_of_steps - 1 ):
lowercase__, lowercase__ : Tuple= current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase__(A , A = True , A = math.inf , A = -math.inf , A = math.inf , A = -math.inf , A = False , A = 100 , A = 0.01 , A = 1 , ) ->Any:
"""simple docstring"""
lowercase__ : Any= False
lowercase__ : int= search_prob
lowercase__ : Optional[Any]= start_temperate
lowercase__ : Any= []
lowercase__ : Any= 0
lowercase__ : Dict= None
while not search_end:
lowercase__ : Optional[int]= current_state.score()
if best_state is None or current_score > best_state.score():
lowercase__ : Dict= current_state
scores.append(A )
iterations += 1
lowercase__ : int= None
lowercase__ : int= current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase__ : Optional[Any]= random.randint(0 , len(A ) - 1 ) # picking a random neighbor
lowercase__ : str= neighbors.pop(A )
lowercase__ : str= picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase__ : str= change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase__ : int= picked_neighbor
else:
lowercase__ : str= (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase__ : Dict= picked_neighbor
lowercase__ : List[str]= current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase__ : str= True
else:
lowercase__ : List[Any]= next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A ) , A )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase__(A , A ) ->Dict:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a : int = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowercase__(A , A ) ->int:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 218
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703
|
def _a ( lowerCamelCase__ ) -> float:
return 10 - x * x
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) >= 0:
raise ValueError('Wrong space!' )
lowerCamelCase_ : Any = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase_ : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) < 0:
lowerCamelCase_ : Optional[int] = c
else:
lowerCamelCase_ : str = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 144
| 0
|
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=None , _a=None ) -> List[str]:
_a : List[Any] = data
_a : List[str] = previous
_a : Union[str, Any] = next_node
def __str__( self ) -> str:
return F"""{self.data}"""
def __lowercase ( self ) -> int:
return self.data
def __lowercase ( self ) -> Union[str, Any]:
return self.next
def __lowercase ( self ) -> str:
return self.previous
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> str:
_a : int = head
def __iter__( self ) -> List[Any]:
return self
def __lowercase ( self ) -> Optional[int]:
if not self.current:
raise StopIteration
else:
_a : Optional[int] = self.current.get_data()
_a : Dict = self.current.get_next()
return value
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> str:
_a : Tuple = None # First node in list
_a : List[str] = None # Last node in list
def __str__( self ) -> List[str]:
_a : Optional[int] = self.head
_a : int = []
while current is not None:
nodes.append(current.get_data() )
_a : Optional[Any] = current.get_next()
return " ".join(str(_a ) for node in nodes )
def __contains__( self , _a ) -> Union[str, Any]:
_a : Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
_a : Optional[Any] = current.get_next()
return False
def __iter__( self ) -> Dict:
return LinkedListIterator(self.head )
def __lowercase ( self ) -> Any:
if self.head:
return self.head.get_data()
return None
def __lowercase ( self ) -> Tuple:
if self.tail:
return self.tail.get_data()
return None
def __lowercase ( self , _a ) -> None:
if self.head is None:
_a : str = node
_a : Tuple = node
else:
self.insert_before_node(self.head , _a )
def __lowercase ( self , _a ) -> None:
if self.head is None:
self.set_head(_a )
else:
self.insert_after_node(self.tail , _a )
def __lowercase ( self , _a ) -> None:
_a : str = Node(_a )
if self.head is None:
self.set_head(_a )
else:
self.set_tail(_a )
def __lowercase ( self , _a , _a ) -> None:
_a : List[str] = node
_a : str = node.previous
if node.get_previous() is None:
_a : Optional[int] = node_to_insert
else:
_a : List[str] = node_to_insert
_a : List[str] = node_to_insert
def __lowercase ( self , _a , _a ) -> None:
_a : Union[str, Any] = node
_a : List[str] = node.next
if node.get_next() is None:
_a : Any = node_to_insert
else:
_a : List[str] = node_to_insert
_a : Union[str, Any] = node_to_insert
def __lowercase ( self , _a , _a ) -> None:
_a : List[Any] = 1
_a : Any = Node(_a )
_a : Any = self.head
while node:
if current_position == position:
self.insert_before_node(_a , _a )
return
current_position += 1
_a : Tuple = node.next
self.insert_after_node(self.tail , _a )
def __lowercase ( self , _a ) -> Node:
_a : Optional[Any] = self.head
while node:
if node.get_data() == item:
return node
_a : Any = node.get_next()
raise Exception('''Node not found''' )
def __lowercase ( self , _a ) -> Union[str, Any]:
if (node := self.get_node(_a )) is not None:
if node == self.head:
_a : Optional[Any] = self.head.get_next()
if node == self.tail:
_a : Any = self.tail.get_previous()
self.remove_node_pointers(_a )
@staticmethod
def __lowercase ( _a ) -> None:
if node.get_next():
_a : Optional[int] = node.previous
if node.get_previous():
_a : List[Any] = node.next
_a : Optional[int] = None
_a : Tuple = None
def __lowercase ( self ) -> str:
return self.head is None
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , UpperCAmelCase__ ).groups()[0]
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , __a , __a=None , __a=None ):
'''simple docstring'''
lowerCamelCase = file_names
lowerCamelCase = image_transform
lowerCamelCase = label_to_id
def __len__(self ):
'''simple docstring'''
return len(self.file_names )
def __getitem__(self , __a ):
'''simple docstring'''
lowerCamelCase = self.file_names[idx]
lowerCamelCase = PIL.Image.open(__a )
lowerCamelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
lowerCamelCase = self.image_transform(__a )
lowerCamelCase = extract_label(__a )
if self.label_to_id is not None:
lowerCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if args.with_tracking:
lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["lr"]
lowerCamelCase = int(config["num_epochs"] )
lowerCamelCase = int(config["seed"] )
lowerCamelCase = int(config["batch_size"] )
lowerCamelCase = config["image_size"]
if not isinstance(UpperCAmelCase__ , (list, tuple) ):
lowerCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
lowerCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase = os.path.split(UpperCAmelCase__ )[-1].split("." )[0]
accelerator.init_trackers(UpperCAmelCase__ , UpperCAmelCase__ )
# Grab all the image filenames
lowerCamelCase = [os.path.join(args.data_dir , UpperCAmelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
lowerCamelCase = [extract_label(UpperCAmelCase__ ) for fname in file_names]
lowerCamelCase = list(set(UpperCAmelCase__ ) )
id_to_label.sort()
lowerCamelCase = {lbl: i for i, lbl in enumerate(UpperCAmelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCAmelCase__ )
torch.manual_seed(UpperCAmelCase__ )
torch.cuda.manual_seed_all(UpperCAmelCase__ )
# Split our filenames between train and validation
lowerCamelCase = np.random.permutation(len(UpperCAmelCase__ ) )
lowerCamelCase = int(0.8 * len(UpperCAmelCase__ ) )
lowerCamelCase = random_perm[:cut]
lowerCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase = Compose([RandomResizedCrop(UpperCAmelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCAmelCase__ , label_to_id=UpperCAmelCase__ )
# For evaluation, we use a deterministic Resize
lowerCamelCase = Compose([Resize(UpperCAmelCase__ ), ToTensor()] )
lowerCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCAmelCase__ , label_to_id=UpperCAmelCase__ )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
lowerCamelCase = DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = create_model("resnet50d" , pretrained=UpperCAmelCase__ , num_classes=len(UpperCAmelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase = False
for param in model.get_classifier().parameters():
lowerCamelCase = True
# We normalize the batches of images to be a bit faster.
lowerCamelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
lowerCamelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCamelCase = OneCycleLR(optimizer=UpperCAmelCase__ , max_lr=UpperCAmelCase__ , epochs=UpperCAmelCase__ , steps_per_epoch=len(UpperCAmelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase = os.path.splitext(UpperCAmelCase__ )[0]
if "epoch" in training_difference:
lowerCamelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
lowerCamelCase = None
else:
lowerCamelCase = int(training_difference.replace("step_" , "" ) )
lowerCamelCase = resume_step // len(UpperCAmelCase__ )
resume_step -= starting_epoch * len(UpperCAmelCase__ )
# Now we train the model
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
if args.with_tracking:
lowerCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase = accelerator.skip_first_batches(UpperCAmelCase__ , UpperCAmelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase = (batch["image"] - mean) / std
lowerCamelCase = model(UpperCAmelCase__ )
lowerCamelCase = torch.nn.functional.cross_entropy(UpperCAmelCase__ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
model.eval()
lowerCamelCase = 0
lowerCamelCase = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase = (batch["image"] - mean) / std
with torch.no_grad():
lowerCamelCase = model(UpperCAmelCase__ )
lowerCamelCase = outputs.argmax(dim=-1 )
lowerCamelCase , lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
lowerCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(UpperCAmelCase__ ),
"epoch": epoch,
} , step=UpperCAmelCase__ , )
if checkpointing_steps == "epoch":
lowerCamelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
if args.with_tracking:
accelerator.end_training()
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=UpperCAmelCase__ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=UpperCAmelCase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCAmelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 623
| 0
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A__: int = get_logger(__name__)
A__: str = R'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class _a :
"""simple docstring"""
@add_start_docstrings(__lowerCamelCase )
def __call__( self: Union[str, Any] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _a :
"""simple docstring"""
@add_start_docstrings(__lowerCamelCase )
def __call__( self: Union[str, Any] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _a ( UpperCamelCase__):
"""simple docstring"""
@add_start_docstrings(__lowerCamelCase )
def __call__( self: int , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int , **__lowerCamelCase: Any ):
'''simple docstring'''
for processor in self:
UpperCamelCase__: int = inspect.signature(processor.__call__ ).parameters
if len(__lowerCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys() )} for "
F"{processor.__class__} are passed to the logits processor." )
UpperCamelCase__: Any = processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
else:
UpperCamelCase__: List[Any] = processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return scores
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: float ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" )
UpperCamelCase__: List[Any] = temperature
def __call__( self: Any , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = scores / self.temperature
return scores
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: str , __lowerCamelCase: float , __lowerCamelCase: float = -float("Inf" ) , __lowerCamelCase: int = 1 ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
UpperCamelCase__: List[Any] = top_p
UpperCamelCase__: Any = filter_value
UpperCamelCase__: Any = min_tokens_to_keep
def __call__( self: List[str] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Dict = lax.top_k(__lowerCamelCase , scores.shape[-1] )
UpperCamelCase__: List[str] = jnp.full_like(__lowerCamelCase , self.filter_value )
UpperCamelCase__: List[Any] = jax.nn.softmax(__lowerCamelCase , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase__: List[Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase__: Dict = jnp.roll(__lowerCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(__lowerCamelCase )
# min tokens to keep
UpperCamelCase__: Any = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCamelCase )
UpperCamelCase__: int = jnp.where(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[Any] = jax.lax.sort_key_val(__lowerCamelCase , __lowerCamelCase )[-1]
return next_scores
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: str , __lowerCamelCase: int , __lowerCamelCase: float = -float("Inf" ) , __lowerCamelCase: int = 1 ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" )
UpperCamelCase__: Optional[int] = max(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Optional[int] = filter_value
def __call__( self: Optional[int] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = scores.shape
UpperCamelCase__: List[Any] = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase__: List[str] = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase__: Tuple = lax.top_k(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: str = jnp.broadcast_to((jnp.arange(__lowerCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase__: Tuple = topk_scores.flatten()
UpperCamelCase__: List[Any] = topk_indices.flatten() + shift
UpperCamelCase__: Optional[Any] = next_scores_flat.at[topk_indices_flat].set(__lowerCamelCase )
UpperCamelCase__: List[Any] = next_scores_flat.reshape(__lowerCamelCase , __lowerCamelCase )
return next_scores
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = bos_token_id
def __call__( self: Dict , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: List[Any] = jnp.full(scores.shape , -float("inf" ) )
UpperCamelCase__: List[str] = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase__: List[str] = jnp.where(__lowerCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowerCamelCase )
return scores
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Dict = max_length
UpperCamelCase__: Optional[Any] = eos_token_id
def __call__( self: Union[str, Any] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Dict = jnp.full(scores.shape , -float("inf" ) )
UpperCamelCase__: Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase__: List[Any] = jnp.where(__lowerCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowerCamelCase )
return scores
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
UpperCamelCase__: Tuple = min_length
UpperCamelCase__: int = eos_token_id
def __call__( self: List[str] , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase__: str = jnp.where(__lowerCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __lowerCamelCase )
return scores
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = list(__lowerCamelCase )
UpperCamelCase__: str = begin_index
def __call__( self: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: List[Any] = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase__: Any = jnp.where(__lowerCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __lowerCamelCase )
return scores
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: List[str] , __lowerCamelCase: list ):
'''simple docstring'''
UpperCamelCase__: Tuple = list(__lowerCamelCase )
def __call__( self: Any , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = dict(__lowerCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase__: Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase__: int = force_token_array.at[index].set(__lowerCamelCase )
UpperCamelCase__: Optional[Any] = jnp.intaa(__lowerCamelCase )
def __call__( self: str , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: jnp.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
def _force_token(__lowerCamelCase: Optional[Any] ):
UpperCamelCase__: str = scores.shape[0]
UpperCamelCase__: int = self.force_token_array[generation_idx]
UpperCamelCase__: Any = jnp.ones_like(__lowerCamelCase , dtype=scores.dtype ) * -float("inf" )
UpperCamelCase__: str = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase__: Optional[Any] = lax.dynamic_update_slice(__lowerCamelCase , __lowerCamelCase , (0, current_token) )
return new_scores
UpperCamelCase__: Optional[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowerCamelCase ) , lambda: scores , ) , )
return scores
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: int = generate_config.eos_token_id
UpperCamelCase__: int = generate_config.no_timestamps_token_id
UpperCamelCase__: Optional[int] = generate_config.no_timestamps_token_id + 1
UpperCamelCase__: int = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__lowerCamelCase , "max_initial_timestamp_index" ):
UpperCamelCase__: Union[str, Any] = generate_config.max_initial_timestamp_index
else:
UpperCamelCase__: Tuple = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase__: Tuple = model_config.vocab_size
def __call__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: List[Any] = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(__lowerCamelCase: Optional[int] , __lowerCamelCase: str ):
UpperCamelCase__: List[str] = jnp.where((cur_len - self.begin_index) >= 1 , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Any = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowerCamelCase , )
UpperCamelCase__: Optional[Any] = jnp.where((cur_len - self.begin_index) < 2 , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Optional[int] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowerCamelCase , __lowerCamelCase , )
return jnp.where(
__lowerCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __lowerCamelCase , )
UpperCamelCase__: Dict = jax.vmap(__lowerCamelCase )(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Dict = jnp.where(cur_len == self.begin_index , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Union[str, Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowerCamelCase , )
UpperCamelCase__: Optional[Any] = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase__: Tuple = jnp.where(
__lowerCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __lowerCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase__: Optional[int] = jax.nn.log_softmax(__lowerCamelCase , axis=-1 )
def handle_cumulative_probs(__lowerCamelCase: Any , __lowerCamelCase: Any ):
UpperCamelCase__: Optional[int] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase__: Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __lowerCamelCase , )
UpperCamelCase__: List[Any] = jax.vmap(__lowerCamelCase )(__lowerCamelCase , __lowerCamelCase )
return scores
| 712
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
A__: List[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( A_):
# initialize config
if "resnet-50" in model_name:
UpperCamelCase__: Optional[Any] = ResNetConfig.from_pretrained("microsoft/resnet-50")
elif "resnet-101" in model_name:
UpperCamelCase__: int = ResNetConfig.from_pretrained("microsoft/resnet-101")
else:
raise ValueError("Model name should include either resnet50 or resnet101")
UpperCamelCase__: Tuple = DetrConfig(use_timm_backbone=A_ ,backbone_config=A_)
# set label attributes
UpperCamelCase__: Any = "panoptic" in model_name
if is_panoptic:
UpperCamelCase__: Optional[Any] = 2_50
else:
UpperCamelCase__: Any = 91
UpperCamelCase__: Tuple = "huggingface/label-files"
UpperCamelCase__: Union[str, Any] = "coco-detection-id2label.json"
UpperCamelCase__: Tuple = json.load(open(hf_hub_download(A_ ,A_ ,repo_type="dataset") ,"r"))
UpperCamelCase__: Union[str, Any] = {int(A_): v for k, v in idalabel.items()}
UpperCamelCase__: int = idalabel
UpperCamelCase__: Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCAmelCase_ ( A_):
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__: Tuple = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
))
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
))
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
))
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
))
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
))
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
))
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
))
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
))
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
))
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
])
return rename_keys
def lowerCAmelCase_ ( A_ ,A_ ,A_):
UpperCamelCase__: int = state_dict.pop(A_)
UpperCamelCase__: List[str] = val
def lowerCAmelCase_ ( A_ ,A_=False):
UpperCamelCase__: Optional[Any] = ""
if is_panoptic:
UpperCamelCase__: Tuple = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__: List[str] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
UpperCamelCase__: Dict = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__: Optional[int] = in_proj_weight[:2_56, :]
UpperCamelCase__: str = in_proj_bias[:2_56]
UpperCamelCase__: List[str] = in_proj_weight[2_56:5_12, :]
UpperCamelCase__: Dict = in_proj_bias[2_56:5_12]
UpperCamelCase__: Optional[Any] = in_proj_weight[-2_56:, :]
UpperCamelCase__: Any = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__: Dict = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
UpperCamelCase__: int = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__: Tuple = in_proj_weight[:2_56, :]
UpperCamelCase__: Any = in_proj_bias[:2_56]
UpperCamelCase__: str = in_proj_weight[2_56:5_12, :]
UpperCamelCase__: Optional[Any] = in_proj_bias[2_56:5_12]
UpperCamelCase__: Any = in_proj_weight[-2_56:, :]
UpperCamelCase__: Optional[int] = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__: Tuple = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
UpperCamelCase__: int = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__: Tuple = in_proj_weight_cross_attn[:2_56, :]
UpperCamelCase__: str = in_proj_bias_cross_attn[:2_56]
UpperCamelCase__: Dict = in_proj_weight_cross_attn[2_56:5_12, :]
UpperCamelCase__: Optional[Any] = in_proj_bias_cross_attn[2_56:5_12]
UpperCamelCase__: List[str] = in_proj_weight_cross_attn[-2_56:, :]
UpperCamelCase__: List[Any] = in_proj_bias_cross_attn[-2_56:]
def lowerCAmelCase_ ( ):
UpperCamelCase__: int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__: Optional[Any] = Image.open(requests.get(A_ ,stream=A_).raw)
return im
@torch.no_grad()
def lowerCAmelCase_ ( A_ ,A_=None ,A_=False):
UpperCamelCase__ , UpperCamelCase__: Optional[int] = get_detr_config(A_)
# load original model from torch hub
UpperCamelCase__: Dict = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F"Converting model {model_name}...")
UpperCamelCase__: Tuple = torch.hub.load("facebookresearch/detr" ,model_name_to_original_name[model_name] ,pretrained=A_).eval()
UpperCamelCase__: Dict = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(A_):
if is_panoptic:
UpperCamelCase__: str = "detr." + src
rename_key(A_ ,A_ ,A_)
# query, key and value matrices need special treatment
read_in_q_k_v(A_ ,is_panoptic=A_)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__: Any = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
UpperCamelCase__: int = state_dict.pop(A_)
UpperCamelCase__: Dict = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase__: Tuple = state_dict.pop(A_)
UpperCamelCase__: int = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
UpperCamelCase__: int = state_dict.pop(A_)
UpperCamelCase__: List[str] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
UpperCamelCase__: Tuple = state_dict.pop(A_)
UpperCamelCase__: Optional[Any] = val
# finally, create HuggingFace model and load state dict
UpperCamelCase__: Any = DetrForSegmentation(A_) if is_panoptic else DetrForObjectDetection(A_)
model.load_state_dict(A_)
model.eval()
# verify our conversion on an image
UpperCamelCase__: Dict = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCamelCase__: Any = DetrImageProcessor(format=A_)
UpperCamelCase__: Optional[int] = processor(images=prepare_img() ,return_tensors="pt")
UpperCamelCase__: str = encoding["pixel_values"]
UpperCamelCase__: Union[str, Any] = detr(A_)
UpperCamelCase__: str = model(A_)
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1e-3)
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1e-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1e-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(A_).mkdir(exist_ok=A_)
model.save_pretrained(A_)
processor.save_pretrained(A_)
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
model.push_to_hub(F"nielsr/{model_name}")
processor.push_to_hub(F"nielsr/{model_name}")
if __name__ == "__main__":
A__: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
A__: Dict = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 221
| 0
|
"""simple docstring"""
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : List[Any] = generate_pascal_triangle(snake_case_ )
for row_idx in range(snake_case_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=""" """ )
else:
print(triangle[row_idx][col_idx] ,end="""""" )
print()
def A_ ( snake_case_ : int ):
'''simple docstring'''
if not isinstance(snake_case_ ,snake_case_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCamelCase : list[list[int]] = []
for current_row_idx in range(snake_case_ ):
UpperCamelCase : List[str] = populate_current_row(snake_case_ ,snake_case_ )
triangle.append(snake_case_ )
return triangle
def A_ ( snake_case_ : list[list[int]] ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : int = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCamelCase , UpperCamelCase : List[Any] = 1, 1
for current_col_idx in range(1 ,snake_case_ ):
calculate_current_element(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
return current_row
def A_ ( snake_case_ : list[list[int]] ,snake_case_ : list[int] ,snake_case_ : int ,snake_case_ : int ,):
'''simple docstring'''
UpperCamelCase : Optional[Any] = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCamelCase : str = triangle[current_row_idx - 1][current_col_idx]
UpperCamelCase : Dict = above_to_left_elt + above_to_right_elt
def A_ ( snake_case_ : int ):
'''simple docstring'''
if not isinstance(snake_case_ ,snake_case_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCamelCase : list[list[int]] = [[1]]
for row_index in range(1 ,snake_case_ ):
UpperCamelCase : Union[str, Any] = [0] + result[-1] + [0]
UpperCamelCase : int = row_index + 1
# Calculate the number of distinct elements in a row
UpperCamelCase : Optional[Any] = sum(divmod(snake_case_ ,2 ) )
UpperCamelCase : Any = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
UpperCamelCase : str = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCamelCase : Optional[Any] = row_first_half + row_second_half
result.append(snake_case_ )
return result
def A_ ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case_ : Callable ,snake_case_ : int ) -> None:
UpperCamelCase : Optional[int] = f'{func.__name__}({value})'
UpperCamelCase : Optional[Any] = timeit(f'__main__.{call}' ,setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case_ ,snake_case_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 499
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def A_ ( snake_case_ : int ,snake_case_ : int = 2 ,snake_case_ : int = 1 ,snake_case_ : int = 3 ,):
'''simple docstring'''
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case_ : int ,snake_case_ : int ,snake_case_ : int ) -> int:
return (pow(snake_case_ ,2 ) + step) % modulus
for _ in range(snake_case_ ):
# These track the position within the cycle detection logic.
UpperCamelCase : Optional[Any] = seed
UpperCamelCase : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCamelCase : int = rand_fn(snake_case_ ,snake_case_ ,snake_case_ )
UpperCamelCase : Dict = rand_fn(snake_case_ ,snake_case_ ,snake_case_ )
UpperCamelCase : Union[str, Any] = rand_fn(snake_case_ ,snake_case_ ,snake_case_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCamelCase : str = gcd(hare - tortoise ,snake_case_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCamelCase : Optional[int] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__A : Any = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
__A : Optional[int] = parser.parse_args()
__A : Optional[int] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
__A : Optional[Any] = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 499
| 1
|
"""simple docstring"""
from typing import Any
def snake_case__ ( _snake_case : list ):
"""simple docstring"""
if not input_list:
return []
UpperCamelCase__ = [input_list.count(_lowerCamelCase ) for value in input_list]
UpperCamelCase__ = max(_lowerCamelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCamelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = ['pixel_values']
def __init__( self :str , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :Union[int, float] = 1 / 2_5_5 , lowerCamelCase_ :bool = True , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , **lowerCamelCase_ :int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase__ = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
UpperCamelCase__ = get_size_dict(lowerCamelCase_ , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Dict , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
lowerCamelCase_ , size=(size["height"], size["width"]) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :List[Any] , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ , size=(size["height"], size["width"]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[int, float] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :int , ) -> str:
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :ImageInput , lowerCamelCase_ :bool = None , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :bool = None , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :float = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ :Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(lowerCamelCase_ )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(lowerCamelCase_ , param_name="crop_size" )
UpperCamelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 304
| 0
|
import re
from filelock import FileLock
try:
import nltk
_lowerCamelCase : str = True
except (ImportError, ModuleNotFoundError):
_lowerCamelCase : Tuple = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
re.sub('''<n>''' , '''''' , lowercase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase_ ) )
| 87
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[int] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[str]=None ):
_A = {}
if top_k is not None:
_A = top_k
return {}, {}, postprocess_params
def __call__( self : int , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : Dict ):
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[Any] ):
_A = load_image(_UpperCAmelCase )
_A = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any ):
_A = self.model(**_UpperCAmelCase )
return model_outputs
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.softmax(-1 )[0]
_A , _A = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
_A = stable_softmax(model_outputs.logits , axis=-1 )[0]
_A = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
_A , _A = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 721
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
a = get_logger(__name__)
a = Path(__file__).parent / '''model_card_template.md'''
a = uuida().hex
a = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
a = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
a = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def _snake_case ( _snake_case : Union[Dict, str, None] = None ) -> str:
'''simple docstring'''
_A = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_snake_case , _snake_case ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(_snake_case , _snake_case ):
ua += "; " + user_agent
return ua
def _snake_case ( _snake_case : str , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
if token is None:
_A = HfFolder.get_token()
if organization is None:
_A = whoami(_snake_case )['name']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def _snake_case ( _snake_case : Optional[int] , _snake_case : int ) -> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(_snake_case , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
_A = args.hub_token if hasattr(_snake_case , 'hub_token' ) else None
_A = get_full_repo_name(_snake_case , token=_snake_case )
_A = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_snake_case , model_name=_snake_case , repo_name=_snake_case , dataset_name=args.dataset_name if hasattr(_snake_case , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_snake_case , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_snake_case , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(_snake_case , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_snake_case , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_snake_case , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(_snake_case , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_snake_case , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
_A = os.path.join(args.output_dir , 'README.md' )
model_card.save(_snake_case )
def _snake_case ( _snake_case : Optional[str] , _snake_case : Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
_A = str(Path(_snake_case ).as_posix() )
_A = re.search(R'snapshots/([^/]+)/' , _snake_case )
if search is None:
return None
_A = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
a = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
a = os.path.join(hf_cache_home, '''diffusers''')
def _snake_case ( _snake_case : Optional[str] = None , _snake_case : Optional[str] = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
_A = DIFFUSERS_CACHE
if old_cache_dir is None:
_A = old_diffusers_cache
_A = Path(_snake_case ).expanduser()
_A = Path(_snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_A = new_cache_dir / old_blob_path.relative_to(_snake_case )
new_blob_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
os.replace(_snake_case , _snake_case )
try:
os.symlink(_snake_case , _snake_case )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
a = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
a = 0
else:
with open(cache_version_file) as f:
try:
a = int(f.read())
except ValueError:
a = 0
if cache_version < 1:
a = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
a = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'''the directory exists and can be written to.'''
)
def _snake_case ( _snake_case : str , _snake_case : Optional[str] = None ) -> str:
'''simple docstring'''
if variant is not None:
_A = weights_name.split('.' )
_A = splits[:-1] + [variant] + splits[-1:]
_A = '.'.join(_snake_case )
return weights_name
def _snake_case ( _snake_case : Tuple , *,
_snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[str]=None , ) -> int:
'''simple docstring'''
_A = str(_snake_case )
if os.path.isfile(_snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(_snake_case ):
if os.path.isfile(os.path.join(_snake_case , _snake_case ) ):
# Load from a PyTorch checkpoint
_A = os.path.join(_snake_case , _snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_snake_case , _snake_case , _snake_case ) ):
_A = os.path.join(_snake_case , _snake_case , _snake_case )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_snake_case ).base_version ) >= version.parse('0.20.0' )
):
try:
_A = hf_hub_download(
_snake_case , filename=_add_variant(_snake_case , _snake_case ) , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , _snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case , _snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_snake_case , _snake_case )}\' so that the correct variant file can be added.''' , _snake_case , )
try:
# 2. Load model file as usual
_A = hf_hub_download(
_snake_case , filename=_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'this model name. Check the model page at '
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 505
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : List[str] = '''mvp'''
lowerCamelCase_ : Any = ['''past_key_values''']
lowerCamelCase_ : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , UpperCAmelCase : Optional[int]=5_02_67 , UpperCAmelCase : Optional[int]=10_24 , UpperCAmelCase : str=12 , UpperCAmelCase : List[str]=40_96 , UpperCAmelCase : int=16 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=40_96 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : Union[str, Any]=10_24 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=2 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Union[str, Any]=1_00 , UpperCAmelCase : int=8_00 , **UpperCAmelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ :List[str] = vocab_size
SCREAMING_SNAKE_CASE_ :List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :Any = d_model
SCREAMING_SNAKE_CASE_ :Union[str, Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ :str = encoder_layers
SCREAMING_SNAKE_CASE_ :Tuple = encoder_attention_heads
SCREAMING_SNAKE_CASE_ :Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ :Union[str, Any] = decoder_layers
SCREAMING_SNAKE_CASE_ :Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE_ :int = dropout
SCREAMING_SNAKE_CASE_ :Any = attention_dropout
SCREAMING_SNAKE_CASE_ :List[str] = activation_dropout
SCREAMING_SNAKE_CASE_ :Any = activation_function
SCREAMING_SNAKE_CASE_ :Union[str, Any] = init_std
SCREAMING_SNAKE_CASE_ :Union[str, Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ :Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE_ :Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE_ :Any = use_cache
SCREAMING_SNAKE_CASE_ :Tuple = encoder_layers
SCREAMING_SNAKE_CASE_ :List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ :Dict = use_prompt
SCREAMING_SNAKE_CASE_ :List[str] = prompt_length
SCREAMING_SNAKE_CASE_ :List[str] = prompt_mid_dim
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , _lowercase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed.")
| 631
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 5
| 0
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__lowerCamelCase : Optional[int] = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : List[Any]=32 ) -> Optional[int]:
set_seed(0 )
__magic_name__ : Optional[Any] = UNetaDModel(sample_size=lowerCamelCase_ , in_channels=3 , out_channels=3 )
__magic_name__ : int = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
__magic_name__ : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__magic_name__ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=lowerCamelCase_ , )
__magic_name__ : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=lowerCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__magic_name__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCamelCase_ ) for _ in range(4 )]
__magic_name__ : int = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase_ ) for _ in range(4 )]
__magic_name__ : Union[str, Any] = [torch.randint(0 , 1000 , (4,) ).long().to(lowerCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
__magic_name__ : Optional[int] = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ : Optional[int] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ : Any = model(lowerCamelCase_ , timesteps[i] ).sample
__magic_name__ : List[Any] = torch.nn.functional.mse_loss(lowerCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__magic_name__ : List[str] = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ : str = model(lowerCamelCase_ , timesteps[i] ).sample
__magic_name__ : List[str] = torch.nn.functional.mse_loss(lowerCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
| 702
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : int = NewType('''DataClass''', Any)
__lowerCamelCase : Optional[Any] = NewType('''DataClassType''', Any)
def lowercase__ ( __A: List[Any] ):
'''simple docstring'''
if isinstance(__A ,__A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def lowercase__ ( __A: list ):
'''simple docstring'''
__magic_name__ : Dict = {str(__A ): choice for choice in choices}
return lambda __A : str_to_choice.get(__A ,__A )
def lowercase__ ( *,
__A: Union[str, List[str]] = None ,__A: str = None ,__A: Any = dataclasses.MISSING ,__A: Callable[[], Any] = dataclasses.MISSING ,__A: dict = None ,**__A: Optional[int] ,):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__magic_name__ : Optional[Any] = {}
if aliases is not None:
__magic_name__ : str = aliases
if help is not None:
__magic_name__ : Optional[int] = help
return dataclasses.field(metadata=__A ,default=__A ,default_factory=__A ,**__A )
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =42
def __init__( self : List[str] , lowerCamelCase_ : Union[DataClassType, Iterable[DataClassType]] , **lowerCamelCase_ : str ) -> Optional[Any]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__magic_name__ : List[str] = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase_ )
if dataclasses.is_dataclass(lowerCamelCase_ ):
__magic_name__ : Union[str, Any] = [dataclass_types]
__magic_name__ : Tuple = list(lowerCamelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase_ )
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ : ArgumentParser , lowerCamelCase_ : dataclasses.Field ) -> str:
__magic_name__ : int = F'''--{field.name}'''
__magic_name__ : str = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCamelCase_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__magic_name__ : Union[str, Any] = kwargs.pop('''aliases''' , [] )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : Tuple = [aliases]
__magic_name__ : Optional[int] = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(lowerCamelCase_ , '''UnionType''' ) and isinstance(lowerCamelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCamelCase_ ) not in field.type.__args__:
# filter `str` in Union
__magic_name__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__magic_name__ : str = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__magic_name__ : List[str] = (
field.type.__args__[0] if isinstance(lowerCamelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
__magic_name__ : Union[str, Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__magic_name__ : Any = {}
if origin_type is Literal or (isinstance(field.type , lowerCamelCase_ ) and issubclass(field.type , lowerCamelCase_ )):
if origin_type is Literal:
__magic_name__ : Optional[int] = field.type.__args__
else:
__magic_name__ : Dict = [x.value for x in field.type]
__magic_name__ : Union[str, Any] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__magic_name__ : List[Any] = field.default
else:
__magic_name__ : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__magic_name__ : Union[str, Any] = copy(lowerCamelCase_ )
# Hack because type=bool in argparse does not behave as we want.
__magic_name__ : str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__magic_name__ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__magic_name__ : int = default
# This tells argparse we accept 0 or 1 value after --field_name
__magic_name__ : Tuple = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__magic_name__ : Any = True
elif isclass(lowerCamelCase_ ) and issubclass(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : Tuple = field.type.__args__[0]
__magic_name__ : List[str] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__magic_name__ : int = field.default_factory()
elif field.default is dataclasses.MISSING:
__magic_name__ : str = True
else:
__magic_name__ : Tuple = field.type
if field.default is not dataclasses.MISSING:
__magic_name__ : str = field.default
elif field.default_factory is not dataclasses.MISSING:
__magic_name__ : Any = field.default_factory()
else:
__magic_name__ : Any = True
parser.add_argument(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__magic_name__ : Dict = False
parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase_ : DataClassType ) -> Optional[int]:
if hasattr(lowerCamelCase_ , '''_argument_group_name''' ):
__magic_name__ : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
__magic_name__ : Any = self
try:
__magic_name__ : Dict[str, type] = get_type_hints(lowerCamelCase_ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCamelCase_ ):
__magic_name__ : Any = '''.'''.join(map(lowerCamelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowerCamelCase_ ):
if not field.init:
continue
__magic_name__ : Tuple = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[Any]=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__magic_name__ : int = []
if args_filename:
args_files.append(Path(lowerCamelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__magic_name__ : str = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase_ , type=lowerCamelCase_ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__magic_name__ , __magic_name__ : List[str] = args_file_parser.parse_known_args(args=lowerCamelCase_ )
__magic_name__ : List[Any] = vars(lowerCamelCase_ ).get(args_file_flag.lstrip('''-''' ) , lowerCamelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase_ ) for p in cmd_args_file_paths] )
__magic_name__ : List[str] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__magic_name__ : Optional[int] = file_args + args if args is not None else file_args + sys.argv[1:]
__magic_name__ , __magic_name__ : Tuple = self.parse_known_args(args=lowerCamelCase_ )
__magic_name__ : Any = []
for dtype in self.dataclass_types:
__magic_name__ : str = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
__magic_name__ : Tuple = {k: v for k, v in vars(lowerCamelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Optional[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : Dict[str, Any] , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
__magic_name__ : int = set(args.keys() )
__magic_name__ : Any = []
for dtype in self.dataclass_types:
__magic_name__ : int = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
__magic_name__ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__magic_name__ : Optional[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase_ )}''' )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(lowerCamelCase_ ) , encoding='''utf-8''' ) as open_json_file:
__magic_name__ : Any = json.loads(open_json_file.read() )
__magic_name__ : Tuple = self.parse_dict(lowerCamelCase_ , allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
__magic_name__ : Any = self.parse_dict(yaml.safe_load(Path(lowerCamelCase_ ).read_text() ) , allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 501
| 0
|
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
try:
lowercase_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase_ = default
else:
# KEY is set, convert it to True or False.
try:
lowercase_ = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase : List[str] = parse_flag_from_env("RUN_SLOW", default=False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=None , __lowerCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
if test_case is None:
return partial(__lowerCAmelCase , version=__lowerCAmelCase )
return unittest.skipUnless(is_torch_version(""">=""" , __lowerCAmelCase ) , F'''test requires torch version >= {version}''' )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__lowerCAmelCase )
UpperCAmelCase : int = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = True
@classmethod
def _UpperCAmelCase ( cls : str):
"""simple docstring"""
lowercase_ = tempfile.mkdtemp()
@classmethod
def _UpperCAmelCase ( cls : Any):
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob("""**/*"""):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase_)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[mock.Mock, List[mock.Mock]]):
"""simple docstring"""
lowercase_ = mocks if isinstance(lowerCAmelCase_ , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = AcceleratorState()
lowercase_ = tensor[None].clone().to(state.device )
lowercase_ = gather(__lowerCAmelCase ).cpu()
lowercase_ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __lowerCAmelCase ):
return False
return True
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = returncode
lowercase_ = stdout
lowercase_ = stderr
async def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
while True:
lowercase_ = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(__lowerCAmelCase ) )
lowercase_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase_ = []
lowercase_ = []
def tee(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="" ):
lowercase_ = line.decode("""utf-8""" ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1_80 , __lowerCAmelCase=False , __lowerCAmelCase=True ) -> _RunOutput:
'''simple docstring'''
lowercase_ = asyncio.get_event_loop()
lowercase_ = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
lowercase_ = """ """.join(__lowerCAmelCase )
if result.returncode > 0:
lowercase_ = """\n""".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
pass
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
'''simple docstring'''
try:
lowercase_ = subprocess.check_output(__lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowerCAmelCase , """decode""" ):
lowercase_ = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{" ".join(__lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 567
|
"""simple docstring"""
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCAmelCase ) if sum_of_digit_factorial(__lowerCAmelCase ) == i )
if __name__ == "__main__":
print(F"{solution() = }")
| 567
| 1
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
snake_case__ : int = random.Random()
def _snake_case ( _snake_case : List[Any] , _snake_case : str=1.0 , _snake_case : Dict=None , _snake_case : Tuple=None ):
if rng is None:
lowerCAmelCase : Optional[int] = global_rng
lowerCAmelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case_( unittest.TestCase ):
def __init__( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : int=7 , UpperCamelCase_ : Any=4_0_0 , UpperCamelCase_ : Tuple=2_0_0_0 , UpperCamelCase_ : Tuple=2_0_4_8 , UpperCamelCase_ : List[str]=1_2_8 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : int=5_1_2 , UpperCamelCase_ : List[str]=3_0 , UpperCamelCase_ : List[Any]=4_4_1_0_0 , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : Optional[Any] = min_seq_length
lowerCAmelCase : Optional[int] = max_seq_length
lowerCAmelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase : List[str] = spectrogram_length
lowerCAmelCase : Union[str, Any] = feature_size
lowerCAmelCase : int = num_audio_channels
lowerCAmelCase : Tuple = hop_length
lowerCAmelCase : str = chunk_length
lowerCAmelCase : Optional[int] = sampling_rate
def lowerCamelCase__ ( self : str ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int=False ):
def _flatten(UpperCamelCase_ : Union[str, Any] ):
return list(itertools.chain(*__A ) )
if equal_length:
lowerCAmelCase : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : List[str] = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_( UpperCamelCase_ , unittest.TestCase ):
__UpperCamelCase = TvltFeatureExtractor
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = TvltFeatureExtractionTester(self )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__A , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__A , '''feature_size''' ) )
self.assertTrue(hasattr(__A , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__A , '''hop_length''' ) )
self.assertTrue(hasattr(__A , '''chunk_length''' ) )
self.assertTrue(hasattr(__A , '''sampling_rate''' ) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Optional[Any] = feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
lowerCAmelCase : List[Any] = self.feature_extraction_class.from_pretrained(__A )
lowerCAmelCase : List[str] = feat_extract_first.to_dict()
lowerCAmelCase : str = feat_extract_second.to_dict()
lowerCAmelCase : List[Any] = dict_first.pop('''mel_filters''' )
lowerCAmelCase : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(__A , '''feat_extract.json''' )
feat_extract_first.to_json_file(__A )
lowerCAmelCase : Any = self.feature_extraction_class.from_json_file(__A )
lowerCAmelCase : Dict = feat_extract_first.to_dict()
lowerCAmelCase : Any = feat_extract_second.to_dict()
lowerCAmelCase : List[str] = dict_first.pop('''mel_filters''' )
lowerCAmelCase : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def lowerCamelCase__ ( self : List[str] ):
# Initialize feature_extractor
lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase : Union[str, Any] = [np.asarray(__A ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCAmelCase : Dict = feature_extractor(__A , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCAmelCase : Union[str, Any] = feature_extractor(
__A , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=__A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase : str = np.asarray(__A )
lowerCAmelCase : Tuple = feature_extractor(__A , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Dict ):
lowerCAmelCase : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase : Tuple = ds.sort('''id''' ).select(range(__A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self._load_datasamples(1 )
lowerCAmelCase : Any = TvltFeatureExtractor()
lowerCAmelCase : List[str] = feature_extractor(__A , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
lowerCAmelCase : Optional[int] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __A , atol=1E-4 ) )
| 703
|
"""simple docstring"""
from torch import nn
class snake_case_( nn.Module ):
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
super().__init__()
lowerCAmelCase : str = class_size
lowerCAmelCase : Dict = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase : int = self.mlp(UpperCamelCase_ )
return logits
| 637
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Dict =AltDiffusionPipeline
lowercase : Tuple =TEXT_TO_IMAGE_PARAMS
lowercase : Union[str, Any] =TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : Tuple =TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase : Optional[int] =TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowercase_ :Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase_ :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
lowercase_ :Any = CLIPTextModel(__A )
lowercase_ :str = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ :Optional[Any] = 77
lowercase_ :Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(__A ).startswith('''mps''' ):
lowercase_ :Dict = torch.manual_seed(__A )
else:
lowercase_ :int = torch.Generator(device=__A ).manual_seed(__A )
lowercase_ :str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase ( self ):
lowercase_ :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[Any] = self.get_dummy_components()
torch.manual_seed(0 )
lowercase_ :List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ :int = RobertaSeriesModelWithTransformation(__A )
lowercase_ :Optional[int] = text_encoder
lowercase_ :Tuple = AltDiffusionPipeline(**__A )
lowercase_ :int = alt_pipe.to(__A )
alt_pipe.set_progress_bar_config(disable=__A )
lowercase_ :Dict = self.get_dummy_inputs(__A )
lowercase_ :Optional[int] = '''A photo of an astronaut'''
lowercase_ :List[str] = alt_pipe(**__A )
lowercase_ :int = output.images
lowercase_ :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :Any = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :Any = self.get_dummy_components()
lowercase_ :List[Any] = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
lowercase_ :str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ :Union[str, Any] = RobertaSeriesModelWithTransformation(__A )
lowercase_ :Union[str, Any] = text_encoder
lowercase_ :str = AltDiffusionPipeline(**__A )
lowercase_ :Tuple = alt_pipe.to(__A )
alt_pipe.set_progress_bar_config(disable=__A )
lowercase_ :str = self.get_dummy_inputs(__A )
lowercase_ :Optional[int] = alt_pipe(**__A )
lowercase_ :Any = output.images
lowercase_ :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :Optional[Any] = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Dict = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__A )
lowercase_ :int = alt_pipe.to(__A )
alt_pipe.set_progress_bar_config(disable=__A )
lowercase_ :Optional[int] = '''A painting of a squirrel eating a burger'''
lowercase_ :str = torch.manual_seed(0 )
lowercase_ :Tuple = alt_pipe([prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowercase_ :int = output.images
lowercase_ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ :Tuple = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Any = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowercase_ :List[str] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__A , safety_checker=__A )
lowercase_ :Dict = alt_pipe.to(__A )
alt_pipe.set_progress_bar_config(disable=__A )
lowercase_ :Union[str, Any] = '''A painting of a squirrel eating a burger'''
lowercase_ :Tuple = torch.manual_seed(0 )
lowercase_ :str = alt_pipe([prompt] , generator=__A , num_inference_steps=2 , output_type='''numpy''' )
lowercase_ :Dict = output.images
lowercase_ :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ :Optional[int] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 257
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Any = logging.get_logger(__name__)
def A__ ( A_ , A_ ) -> List[Any]:
_lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def A__ ( A_ , A_ ) -> Optional[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowercase = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
_lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A__ ( A_ , A_ , A_ ) -> str:
_lowercase = dct.pop(A_ )
_lowercase = val
def A__ ( A_ ) -> Optional[int]:
if "handwritten" in checkpoint_url:
_lowercase = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowercase = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_lowercase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
return im
@torch.no_grad()
def A__ ( A_ , A_ ) -> str:
_lowercase = ViTConfig(image_size=384 , qkv_bias=A_ )
_lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowercase = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowercase = 1_024
_lowercase = 4_096
_lowercase = 24
_lowercase = 16
_lowercase = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowercase = False
_lowercase = "relu"
_lowercase = 1_024
_lowercase = True
_lowercase = False
_lowercase = False
# load HuggingFace model
_lowercase = ViTModel(A_ , add_pooling_layer=A_ )
_lowercase = TrOCRForCausalLM(A_ )
_lowercase = VisionEncoderDecoderModel(encoder=A_ , decoder=A_ )
model.eval()
# load state_dict of original model, rename some keys
_lowercase = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" , check_hash=A_ )["model"]
_lowercase = create_rename_keys(A_ , A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowercase = state_dict.pop(A_ )
if key.startswith("decoder" ) and "output_projection" not in key:
_lowercase = val
else:
_lowercase = val
# load state dict
model.load_state_dict(A_ )
# Check outputs on an image
_lowercase = ViTImageProcessor(size=encoder_config.image_size )
_lowercase = RobertaTokenizer.from_pretrained("roberta-large" )
_lowercase = TrOCRProcessor(A_ , A_ )
_lowercase = processor(images=prepare_img(A_ ) , return_tensors="pt" ).pixel_values
# verify logits
_lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowercase = model(pixel_values=A_ , decoder_input_ids=A_ )
_lowercase = outputs.logits
_lowercase = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
_lowercase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowercase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_lowercase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_lowercase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , A_ , atol=1e-3 ), "First elements of logits not as expected"
Path(A_ ).mkdir(exist_ok=A_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(A_ )
if __name__ == "__main__":
__magic_name__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__magic_name__ : List[Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 497
| 0
|
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A_ : Optional[int] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 616
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ = False
return options
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=__UpperCAmelCase , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 616
| 1
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __a :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=None , a__=2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = num_patches + 2
def snake_case_ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = DeiTModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = DeiTForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = DeiTForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = self.type_sequence_label_size
_lowerCamelCase = DeiTForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = DeiTForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : List[str] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
def snake_case_ ( self ):
_lowerCamelCase = DeiTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(a__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def snake_case_ ( self , a__ , a__ , a__=False ):
_lowerCamelCase = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case_ ( self ):
if not self.model_tester.is_training:
return
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_lowerCamelCase = model_class(a__ )
model.to(a__ )
model.train()
_lowerCamelCase = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_lowerCamelCase = model(**a__ ).loss
loss.backward()
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCamelCase = False
_lowerCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(a__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_lowerCamelCase = model_class(a__ )
model.gradient_checkpointing_enable()
model.to(a__ )
model.train()
_lowerCamelCase = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_lowerCamelCase = model(**a__ ).loss
loss.backward()
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a__ ),
*get_values(a__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
_lowerCamelCase = problem_type['title']
_lowerCamelCase = problem_type['num_labels']
_lowerCamelCase = model_class(a__ )
model.to(a__ )
model.train()
_lowerCamelCase = self._prepare_for_class(a__ , a__ , return_labels=a__ )
if problem_type["num_labels"] > 1:
_lowerCamelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
_lowerCamelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a__ ) as warning_list:
_lowerCamelCase = model(**a__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def snake_case_ ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = DeiTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE_ ( )-> str:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ):
_lowerCamelCase = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
a__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=a__ , return_tensors='pt' ).to(a__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**a__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCamelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case_ ( self ):
_lowerCamelCase = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=a__ , return_tensors='pt' )
_lowerCamelCase = inputs.pixel_values.to(a__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowerCamelCase = model(a__ )
| 650
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A_ : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : Tuple , snake_case : Union[str, Any] )-> Optional[Any]:
_lowerCamelCase = WavaVecaForSequenceClassification.from_pretrained(snake_case , config=snake_case )
_lowerCamelCase = downstream_dict['projector.weight']
_lowerCamelCase = downstream_dict['projector.bias']
_lowerCamelCase = downstream_dict['model.post_net.linear.weight']
_lowerCamelCase = downstream_dict['model.post_net.linear.bias']
return model
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : Dict , snake_case : Optional[int] )-> List[str]:
_lowerCamelCase = WavaVecaForAudioFrameClassification.from_pretrained(snake_case , config=snake_case )
_lowerCamelCase = downstream_dict['model.linear.weight']
_lowerCamelCase = downstream_dict['model.linear.bias']
return model
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : List[str] , snake_case : Any )-> Tuple:
_lowerCamelCase = WavaVecaForXVector.from_pretrained(snake_case , config=snake_case )
_lowerCamelCase = downstream_dict['connector.weight']
_lowerCamelCase = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCamelCase = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_lowerCamelCase = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_lowerCamelCase = downstream_dict['objective.W']
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] , snake_case : Optional[Any] , snake_case : int , snake_case : Dict )-> str:
_lowerCamelCase = torch.load(snake_case , map_location='cpu' )
_lowerCamelCase = checkpoint['Downstream']
_lowerCamelCase = WavaVecaConfig.from_pretrained(snake_case )
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(
snake_case , return_attention_mask=snake_case , do_normalize=snake_case )
_lowerCamelCase = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_lowerCamelCase = convert_classification(snake_case , snake_case , snake_case )
elif arch.endswith('ForAudioFrameClassification' ):
_lowerCamelCase = convert_diarization(snake_case , snake_case , snake_case )
elif arch.endswith('ForXVector' ):
_lowerCamelCase = convert_xvector(snake_case , snake_case , snake_case )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_lowerCamelCase = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(snake_case )
hf_model.save_pretrained(snake_case )
if __name__ == "__main__":
A_ : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
A_ : List[Any] =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 650
| 1
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _A ( unittest.TestCase ):
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = get_activation("""swish""" )
self.assertIsInstance(lowerCamelCase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = get_activation("""silu""" )
self.assertIsInstance(lowerCamelCase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : Any = get_activation("""mish""" )
self.assertIsInstance(lowerCamelCase__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def a ( self : Any ):
"""simple docstring"""
__UpperCamelCase : Any = get_activation("""gelu""" )
self.assertIsInstance(lowerCamelCase__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 515
|
import math
def __lowerCamelCase ( __lowerCAmelCase : int = 100 ) -> int:
__UpperCamelCase : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
__UpperCamelCase : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 515
| 1
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
_a : List[str] = parser.parse_args()
_a : Tuple = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_a : List[Any] = CLIPImageProcessor()
_a : List[Any] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
_a : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 689
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["image_processor", "tokenizer"]
_lowerCamelCase = "OwlViTImageProcessor"
_lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase , )
lowerCamelCase_ = kwargs.pop("feature_extractor" )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )):
lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase ) != max_num_queries:
lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase ))
lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
encodings.append(UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , )
return self.image_processor_class
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , )
return self.image_processor
| 675
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_A = {'facebook/blenderbot_small-90M': 512}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
a_ = set()
a_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ = char
a_ = set(UpperCamelCase )
return pairs
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : str = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="__start__" , _SCREAMING_SNAKE_CASE="__end__" , _SCREAMING_SNAKE_CASE="__unk__" , _SCREAMING_SNAKE_CASE="__null__" , **_SCREAMING_SNAKE_CASE , ):
super().__init__(unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle:
a_ = json.load(_SCREAMING_SNAKE_CASE )
a_ = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as merges_handle:
a_ = merges_handle.read().split("""\n""" )[1:-1]
a_ = [tuple(merge.split() ) for merge in merges]
a_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
a_ = {}
@property
def __magic_name__ ( self ):
return len(self.encoder )
def __magic_name__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
if token in self.cache:
return self.cache[token]
a_ = re.sub("""([.,!?()])""" , R""" \1""" , _SCREAMING_SNAKE_CASE )
a_ = re.sub("""(')""" , R""" \1 """ , _SCREAMING_SNAKE_CASE )
a_ = re.sub(R"""\s{2,}""" , """ """ , _SCREAMING_SNAKE_CASE )
if "\n" in token:
a_ = token.replace("""\n""" , """ __newln__""" )
a_ = token.split(""" """ )
a_ = []
for token in tokens:
if not len(_SCREAMING_SNAKE_CASE ):
continue
a_ = token.lower()
a_ = tuple(_SCREAMING_SNAKE_CASE )
a_ = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
a_ = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
words.append(_SCREAMING_SNAKE_CASE )
continue
while True:
a_ = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a_ , a_ = bigram
a_ = []
a_ = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
a_ = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
a_ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ = tuple(_SCREAMING_SNAKE_CASE )
a_ = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
a_ = get_pairs(_SCREAMING_SNAKE_CASE )
a_ = """@@ """.join(_SCREAMING_SNAKE_CASE )
a_ = word[:-4]
a_ = word
words.append(_SCREAMING_SNAKE_CASE )
return " ".join(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
a_ = []
a_ = re.findall(R"""\S+\n?""" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(""" """ ) ) )
return split_tokens
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
a_ = token.lower()
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
a_ = """ """.join(_SCREAMING_SNAKE_CASE ).replace("""@@ """ , """""" ).strip()
return out_string
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + """\n""" )
a_ = 0
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
a_ = token_index
writer.write(""" """.join(_SCREAMING_SNAKE_CASE ) + """\n""" )
index += 1
return vocab_file, merge_file
| 403
|
import argparse
import json
import subprocess
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Tuple ) -> Dict:
"""simple docstring"""
a_ = []
a_ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
a_ = subprocess.run(UpperCamelCase , shell=UpperCamelCase , stdout=subprocess.PIPE )
a_ = output.stdout.decode("""utf-8""" )
a_ = json.loads(UpperCamelCase )
a_ = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
a_ = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
return values.split(""",""" )
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_A = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 403
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a_ = {
'sample_size': 3_2,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1_0_0_0,
'block_out_channels': [3_2, 6_4],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
a_ = {
'sample_size': 6_4,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1_0_0_0,
'block_out_channels': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
'attention_head_dim': 6_4,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
a_ = {
'sample_size': 2_5_6,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
'attention_head_dim': 6_4,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
a_ = {
'num_train_timesteps': 4_0,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
a_ = {
'num_train_timesteps': 2_0_1,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
a_ = {
'num_train_timesteps': 1_5_1,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def __UpperCAmelCase ( __UpperCamelCase ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
__lowercase : Optional[int] = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
__lowercase : Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
__lowercase : List[str] = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
__lowercase : List[str] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
__lowercase : Tuple = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
__lowercase : str = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
__lowercase : Tuple = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
__lowercase : Optional[int] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
__lowercase : Dict = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
__lowercase : Tuple = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
__lowercase : Optional[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
__lowercase : int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
__lowercase ,__lowercase ,__lowercase : Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
__lowercase ,__lowercase ,__lowercase : List[Any] = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
__lowercase : List[str] = checkpoint[f"""{old_prefix}.norm.weight"""]
__lowercase : Tuple = checkpoint[f"""{old_prefix}.norm.bias"""]
__lowercase : Tuple = weight_q.squeeze(-1 ).squeeze(-1 )
__lowercase : int = bias_q.squeeze(-1 ).squeeze(-1 )
__lowercase : int = weight_k.squeeze(-1 ).squeeze(-1 )
__lowercase : Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
__lowercase : int = weight_v.squeeze(-1 ).squeeze(-1 )
__lowercase : str = bias_v.squeeze(-1 ).squeeze(-1 )
__lowercase : List[str] = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
__lowercase : int = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = torch.load(__UpperCamelCase , map_location='''cpu''' )
__lowercase : Union[str, Any] = {}
__lowercase : Union[str, Any] = checkpoint['''time_embed.0.weight''']
__lowercase : List[Any] = checkpoint['''time_embed.0.bias''']
__lowercase : Dict = checkpoint['''time_embed.2.weight''']
__lowercase : Optional[int] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
__lowercase : Optional[Any] = checkpoint['''label_emb.weight''']
__lowercase : Any = checkpoint['''input_blocks.0.0.weight''']
__lowercase : str = checkpoint['''input_blocks.0.0.bias''']
__lowercase : Dict = unet_config['''down_block_types''']
__lowercase : str = unet_config['''layers_per_block''']
__lowercase : Tuple = unet_config['''attention_head_dim''']
__lowercase : List[str] = unet_config['''block_out_channels''']
__lowercase : Tuple = 1
__lowercase : Optional[Any] = channels_list[0]
for i, layer_type in enumerate(__UpperCamelCase ):
__lowercase : List[Any] = channels_list[i]
__lowercase : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__UpperCamelCase ):
__lowercase : Dict = f"""down_blocks.{i}.resnets.{j}"""
__lowercase : Union[str, Any] = f"""input_blocks.{current_layer}.0"""
__lowercase : List[Any] = True if j == 0 and downsample_block_has_skip else False
__lowercase : Union[str, Any] = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , has_skip=__UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__UpperCamelCase ):
__lowercase : str = f"""down_blocks.{i}.resnets.{j}"""
__lowercase : int = f"""input_blocks.{current_layer}.0"""
__lowercase : int = True if j == 0 and downsample_block_has_skip else False
__lowercase : Dict = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , has_skip=__UpperCamelCase )
__lowercase : Any = f"""down_blocks.{i}.attentions.{j}"""
__lowercase : Dict = f"""input_blocks.{current_layer}.1"""
__lowercase : Dict = convert_attention(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
current_layer += 1
if i != len(__UpperCamelCase ) - 1:
__lowercase : int = f"""down_blocks.{i}.downsamplers.0"""
__lowercase : Optional[int] = f"""input_blocks.{current_layer}.0"""
__lowercase : int = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
current_layer += 1
__lowercase : Any = current_channels
# hardcoded the mid-block for now
__lowercase : List[Any] = '''mid_block.resnets.0'''
__lowercase : int = '''middle_block.0'''
__lowercase : Optional[int] = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowercase : str = '''mid_block.attentions.0'''
__lowercase : Any = '''middle_block.1'''
__lowercase : int = convert_attention(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowercase : Union[str, Any] = '''mid_block.resnets.1'''
__lowercase : Union[str, Any] = '''middle_block.2'''
__lowercase : int = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowercase : Optional[int] = 0
__lowercase : Optional[int] = unet_config['''up_block_types''']
for i, layer_type in enumerate(__UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowercase : int = f"""up_blocks.{i}.resnets.{j}"""
__lowercase : List[str] = f"""output_blocks.{current_layer}.0"""
__lowercase : Tuple = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , has_skip=__UpperCamelCase )
current_layer += 1
if i != len(__UpperCamelCase ) - 1:
__lowercase : Union[str, Any] = f"""up_blocks.{i}.upsamplers.0"""
__lowercase : Tuple = f"""output_blocks.{current_layer-1}.1"""
__lowercase : List[str] = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowercase : Union[str, Any] = f"""up_blocks.{i}.resnets.{j}"""
__lowercase : List[str] = f"""output_blocks.{current_layer}.0"""
__lowercase : Any = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , has_skip=__UpperCamelCase )
__lowercase : Tuple = f"""up_blocks.{i}.attentions.{j}"""
__lowercase : str = f"""output_blocks.{current_layer}.1"""
__lowercase : Dict = convert_attention(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
current_layer += 1
if i != len(__UpperCamelCase ) - 1:
__lowercase : List[str] = f"""up_blocks.{i}.upsamplers.0"""
__lowercase : Dict = f"""output_blocks.{current_layer-1}.2"""
__lowercase : Dict = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowercase : Tuple = checkpoint['''out.0.weight''']
__lowercase : List[Any] = checkpoint['''out.0.bias''']
__lowercase : str = checkpoint['''out.2.weight''']
__lowercase : List[Any] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
a_ = parser.parse_args()
a_ = strabool(args.class_cond)
a_ = os.path.basename(args.unet_path)
print(F"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
a_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a_ = TEST_UNET_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
a_ = None
a_ = con_pt_to_diffuser(args.unet_path, unet_config)
a_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
a_ = CMStochasticIterativeScheduler(**scheduler_config)
a_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 76
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase = '\\n\n'
_lowerCamelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
_lowerCamelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string'''),
}) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __snake_case ( self , a__ , a__ , a__ = 16 , a__ = True , a__=None):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCamelCase : List[Any] = '''cuda'''
else:
_lowerCamelCase : Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(a__)
_lowerCamelCase : Union[str, Any] = model.to(a__)
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(a__)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCamelCase : Any = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(a__) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCamelCase : Optional[int] = model.config.max_length - 1
else:
_lowerCamelCase : Optional[int] = model.config.max_length
_lowerCamelCase : Union[str, Any] = tokenizer(
a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors='''pt''' , return_attention_mask=a__ , ).to(a__)
_lowerCamelCase : Any = encodings['''input_ids''']
_lowerCamelCase : Dict = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCamelCase : Dict = []
_lowerCamelCase : Union[str, Any] = CrossEntropyLoss(reduction='''none''')
for start_index in logging.tqdm(range(0 , len(a__) , a__)):
_lowerCamelCase : Any = min(start_index + batch_size , len(a__))
_lowerCamelCase : List[Any] = encoded_texts[start_index:end_index]
_lowerCamelCase : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
_lowerCamelCase : Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(a__)
_lowerCamelCase : str = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
_lowerCamelCase : str = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(a__), attn_mask] , dim=1)
_lowerCamelCase : Dict = encoded_batch
with torch.no_grad():
_lowerCamelCase : List[str] = model(a__ , attention_mask=a__).logits
_lowerCamelCase : List[str] = out_logits[..., :-1, :].contiguous()
_lowerCamelCase : Dict = labels[..., 1:].contiguous()
_lowerCamelCase : List[Any] = attn_mask[..., 1:].contiguous()
_lowerCamelCase : str = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , a__) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a__)}
| 114
| 0
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ : Dict = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
UpperCamelCase__ : List[Any] = {
"gpt-neox-20b": 2_048,
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , A__=None , A__=None , A__=None , A__="<|endoftext|>" , A__="<|endoftext|>" , A__="<|endoftext|>" , A__=False , **A__ , ) -> List[Any]:
super().__init__(
A__ , A__ , tokenizer_file=A__ , unk_token=A__ , bos_token=A__ , eos_token=A__ , add_prefix_space=A__ , **A__ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A__ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(A__ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**A__ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCamelCase ( self , A__ ) -> List[int]:
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A__ , add_special_tokens=A__ ) + [self.eos_token_id] )
if len(A__ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 708
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
lowercase_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
lowercase_ = '▁'
# Segments (not really needed)
lowercase_ = 0
lowercase_ = 1
lowercase_ = 2
lowercase_ = 3
lowercase_ = 4
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = "left"
_A = XLNetTokenizer
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[str]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<sep>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : int="<cls>" , SCREAMING_SNAKE_CASE_ : Any="<mask>" , SCREAMING_SNAKE_CASE_ : Any=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE_ : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_a = 3
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 562
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowercase_ = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
lowercase_ = '|'.join(sys.argv[1:])
lowercase_ = re.compile(rf"""^({joined_dirs}).*?\.py$""")
lowercase_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 562
| 1
|
def lowerCAmelCase__ ( UpperCamelCase_ : list[int] )-> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
A__ = sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_lowercase = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''albert'''
def __init__( self , a__=3_0_0_0_0 , a__=1_2_8 , a__=4_0_9_6 , a__=1_2 , a__=1 , a__=6_4 , a__=1_6_3_8_4 , a__=1 , a__="gelu_new" , a__=0 , a__=0 , a__=5_1_2 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0.1 , a__="absolute" , a__=0 , a__=2 , a__=3 , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__)
A__ = vocab_size
A__ = embedding_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_hidden_groups
A__ = num_attention_heads
A__ = inner_group_num
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = classifier_dropout_prob
A__ = position_embedding_type
class _UpperCAmelCase ( A__ ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 526
| 1
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = GPTSwaTokenizer
_snake_case : Dict = False
_snake_case : str = True
_snake_case : int = False
def __a ( self :Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ :int = GPTSwaTokenizer(lowerCamelCase__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self :Dict , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :List[Any] = """This is a test"""
UpperCamelCase__ :Optional[Any] = """This is a test"""
return input_text, output_text
def __a ( self :Optional[Any] ):
UpperCamelCase__ :int = """<s>"""
UpperCamelCase__ :Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCamelCase__ ) , 20_00 )
def __a ( self :List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def __a ( self :Any ):
UpperCamelCase__ :Union[str, Any] = GPTSwaTokenizer(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
UpperCamelCase__ :Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
UpperCamelCase__ :Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
UpperCamelCase__ :Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Dict = GPTSwaTokenizer(lowerCamelCase__ )
UpperCamelCase__ :List[str] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
UpperCamelCase__ :Union[str, Any] = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
UpperCamelCase__ :Tuple = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
UpperCamelCase__ :str = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=lowerCamelCase__ , )
| 45
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Optional[int] = False
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return TrainCommand(lowerCAmelCase )
class UpperCamelCase_ ( a_ ):
@staticmethod
def UpperCamelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=snake_case__ , required=snake_case__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=snake_case__ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=snake_case__ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=snake_case__ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=snake_case__ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=snake_case__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=snake_case__ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=snake_case__ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=snake_case__ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=snake_case__ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=snake_case__ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=snake_case__ , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=snake_case__ , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=snake_case__ )
def __init__( self , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = logging.get_logger("""transformers-cli/training""" )
UpperCAmelCase = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=snake_case__ )
UpperCAmelCase = args.output
UpperCAmelCase = args.column_label
UpperCAmelCase = args.column_text
UpperCAmelCase = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
UpperCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
UpperCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
UpperCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase = args.validation_split
UpperCAmelCase = args.train_batch_size
UpperCAmelCase = args.valid_batch_size
UpperCAmelCase = args.learning_rate
UpperCAmelCase = args.adam_epsilon
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 673
| 0
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
| 1
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A ( lowercase__ : Optional[int] , lowercase__ : Tuple ) -> Optional[int]:
UpperCamelCase__ :List[Any] = XCLIPTextConfig()
# derive patch size from model name
UpperCamelCase__ :Dict = model_name.find("""patch""" )
UpperCamelCase__ :Any = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
UpperCamelCase__ :Dict = XCLIPVisionConfig(patch_size=lowercase__ , num_frames=lowercase__ )
if "large" in model_name:
UpperCamelCase__ :Dict = 768
UpperCamelCase__ :List[str] = 3072
UpperCamelCase__ :int = 12
UpperCamelCase__ :Tuple = 1024
UpperCamelCase__ :Union[str, Any] = 4096
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :Dict = 24
UpperCamelCase__ :Any = 768
UpperCamelCase__ :List[Any] = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCamelCase__ :Optional[Any] = 336
UpperCamelCase__ :List[str] = XCLIPConfig.from_text_vision_configs(lowercase__ , lowercase__ )
if "large" in model_name:
UpperCamelCase__ :List[str] = 768
return config
def A ( lowercase__ : Dict ) -> Optional[int]:
# text encoder
if name == "token_embedding.weight":
UpperCamelCase__ :str = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
UpperCamelCase__ :str = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
UpperCamelCase__ :List[Any] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
UpperCamelCase__ :Tuple = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
UpperCamelCase__ :Optional[int] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
UpperCamelCase__ :Optional[Any] = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
UpperCamelCase__ :Union[str, Any] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
UpperCamelCase__ :int = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
UpperCamelCase__ :Optional[Any] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
UpperCamelCase__ :Union[str, Any] = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
UpperCamelCase__ :Any = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
UpperCamelCase__ :Optional[int] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
UpperCamelCase__ :List[str] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
UpperCamelCase__ :Optional[int] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
UpperCamelCase__ :Any = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
UpperCamelCase__ :Union[str, Any] = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
UpperCamelCase__ :List[Any] = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
UpperCamelCase__ :List[Any] = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
UpperCamelCase__ :Union[str, Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
UpperCamelCase__ :List[str] = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
UpperCamelCase__ :Optional[Any] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
UpperCamelCase__ :Optional[Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def A ( lowercase__ : Optional[int] , lowercase__ : Dict ) -> Dict:
for key in orig_state_dict.copy().keys():
UpperCamelCase__ :int = orig_state_dict.pop(lowercase__ )
if "attn.in_proj" in key:
UpperCamelCase__ :Any = key.split(""".""" )
if key.startswith("""visual""" ):
UpperCamelCase__ :str = key_split[3]
UpperCamelCase__ :str = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCamelCase__ :List[str] = val[
:dim, :
]
UpperCamelCase__ :Any = val[
dim : dim * 2, :
]
UpperCamelCase__ :Dict = val[
-dim:, :
]
else:
UpperCamelCase__ :Optional[Any] = val[
:dim
]
UpperCamelCase__ :str = val[
dim : dim * 2
]
UpperCamelCase__ :List[Any] = val[
-dim:
]
else:
if "weight" in key:
UpperCamelCase__ :List[Any] = val[
:dim, :
]
UpperCamelCase__ :Optional[Any] = val[
dim : dim * 2, :
]
UpperCamelCase__ :Union[str, Any] = val[
-dim:, :
]
else:
UpperCamelCase__ :int = val[:dim]
UpperCamelCase__ :List[Any] = val[
dim : dim * 2
]
UpperCamelCase__ :Any = val[-dim:]
elif key.startswith("""mit""" ):
UpperCamelCase__ :Dict = key_split[2]
UpperCamelCase__ :List[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCamelCase__ :Dict = val[:dim, :]
UpperCamelCase__ :Any = val[dim : dim * 2, :]
UpperCamelCase__ :List[str] = val[-dim:, :]
else:
UpperCamelCase__ :Union[str, Any] = val[:dim]
UpperCamelCase__ :List[str] = val[dim : dim * 2]
UpperCamelCase__ :str = val[-dim:]
else:
UpperCamelCase__ :Dict = key_split[2]
UpperCamelCase__ :Tuple = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase__ :List[str] = val[:dim, :]
UpperCamelCase__ :Optional[Any] = val[
dim : dim * 2, :
]
UpperCamelCase__ :Dict = val[-dim:, :]
else:
UpperCamelCase__ :Tuple = val[:dim]
UpperCamelCase__ :Tuple = val[
dim : dim * 2
]
UpperCamelCase__ :Dict = val[-dim:]
else:
UpperCamelCase__ :Dict = rename_key(lowercase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCamelCase__ :Union[str, Any] = val.T
UpperCamelCase__ :int = val
return orig_state_dict
def A ( lowercase__ : Dict ) -> Union[str, Any]:
if num_frames == 8:
UpperCamelCase__ :int = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
UpperCamelCase__ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
UpperCamelCase__ :Any = """eating_spaghetti_32_frames.npy"""
UpperCamelCase__ :List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowercase__ , repo_type="""dataset""" , )
UpperCamelCase__ :Optional[Any] = np.load(lowercase__ )
return list(lowercase__ )
def A ( lowercase__ : int , lowercase__ : Dict=None , lowercase__ : List[str]=False ) -> int:
UpperCamelCase__ :Dict = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
UpperCamelCase__ :List[Any] = model_to_url[model_name]
UpperCamelCase__ :Optional[int] = 8
if "16-frames" in model_name:
UpperCamelCase__ :Union[str, Any] = 16
elif "shot" in model_name:
UpperCamelCase__ :Any = 32
UpperCamelCase__ :Dict = get_xclip_config(lowercase__ , lowercase__ )
UpperCamelCase__ :List[Any] = XCLIPModel(lowercase__ )
model.eval()
if "drive" in checkpoint_url:
UpperCamelCase__ :int = """pytorch_model.bin"""
gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ )
UpperCamelCase__ :str = torch.load(lowercase__ , map_location="""cpu""" )["""model"""]
else:
UpperCamelCase__ :Tuple = torch.hub.load_state_dict_from_url(lowercase__ )["""model"""]
UpperCamelCase__ :Optional[int] = convert_state_dict(lowercase__ , lowercase__ )
UpperCamelCase__ :Tuple = XCLIPModel(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :str = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCamelCase__ :Dict = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
UpperCamelCase__ :Union[str, Any] = VideoMAEImageProcessor(size=lowercase__ )
UpperCamelCase__ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
UpperCamelCase__ :Any = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
UpperCamelCase__ :Optional[Any] = XCLIPProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
UpperCamelCase__ :Union[str, Any] = prepare_video(lowercase__ )
UpperCamelCase__ :List[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowercase__ , return_tensors="""pt""" , padding=lowercase__ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCamelCase__ :str = model(**lowercase__ )
# Verify outputs
UpperCamelCase__ :Dict = outputs.logits_per_video
UpperCamelCase__ :Dict = logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowercase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCamelCase__ :Optional[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCamelCase__ :Optional[int] = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
UpperCamelCase__ :Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCamelCase__ :str = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
UpperCamelCase__ :Dict = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCamelCase__ :str = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCamelCase__ :List[str] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCamelCase__ :int = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCamelCase__ :str = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCamelCase__ :Any = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCamelCase__ :List[Any] = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCamelCase__ :Dict = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCamelCase__ :Any = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCamelCase__ :Optional[int] = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCamelCase__ :List[str] = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCamelCase__ :Dict = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCamelCase__ :int = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCamelCase__ :Optional[Any] = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowercase__ , organization="""nielsr""" )
processor.push_to_hub(lowercase__ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowercase__ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 45
|
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self : Any , A_ : Any , A_ : List[str]=1_3 , A_ : Tuple=[3_0, 3_0] , A_ : List[Any]=2 , A_ : str=3 , A_ : Dict=True , A_ : List[str]=True , A_ : Optional[int]=3_2 , A_ : List[Any]=5 , A_ : Dict=4 , A_ : Optional[Any]=3_7 , A_ : Any="gelu" , A_ : Any=0.1 , A_ : List[str]=0.1 , A_ : Optional[Any]=1_0 , A_ : int=0.02 , A_ : List[str]=3 , A_ : Optional[Any]=None , A_ : List[Any]=8 , A_ : Any=1_0 , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = scope
__lowercase = n_targets
__lowercase = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__lowercase = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__lowercase = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__lowercase = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__lowercase = []
for i in range(self.batch_size ):
__lowercase = {}
__lowercase = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=A_ )
__lowercase = torch.rand(self.n_targets , 4 , device=A_ )
labels.append(A_ )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : Optional[Any] , A_ : List[Any] , A_ : int ):
'''simple docstring'''
__lowercase = YolosModel(config=A_ )
model.to(A_ )
model.eval()
__lowercase = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : List[str] , A_ : int , A_ : List[str] ):
'''simple docstring'''
__lowercase = YolosForObjectDetection(A_ )
model.to(A_ )
model.eval()
__lowercase = model(pixel_values=A_ )
__lowercase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__lowercase = model(pixel_values=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _a , _a , unittest.TestCase ):
a : Tuple = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
a : int = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
a : int = False
a : int = False
a : int = False
a : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : Optional[Any] , A_ : Any , A_ : List[Any]=False ):
'''simple docstring'''
__lowercase = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__lowercase = []
for i in range(self.model_tester.batch_size ):
__lowercase = {}
__lowercase = torch.ones(
size=(self.model_tester.n_targets,) , device=A_ , dtype=torch.long )
__lowercase = torch.ones(
self.model_tester.n_targets , 4 , device=A_ , dtype=torch.float )
labels.append(A_ )
__lowercase = labels
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = YolosModelTester(self )
__lowercase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(A_ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
# in YOLOS, the seq_len is different
__lowercase = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(A_ , A_ ) )
__lowercase = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(A_ , A_ ) )
__lowercase = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowercase = len(A_ )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(A_ , A_ ) )
__lowercase = 1
self.assertEqual(out_len + added_hidden_states , len(A_ ) )
__lowercase = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(A_ : Optional[Any] , A_ : Union[str, Any] , A_ : Any ):
__lowercase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(A_ , A_ ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
# YOLOS has a different seq_length
__lowercase = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*A_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = YolosModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(A_ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
__lowercase = model(inputs.pixel_values )
# verify outputs
__lowercase = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , A_ )
__lowercase = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=A_ , )
__lowercase = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , A_ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , A_ , atol=1e-4 ) )
# verify postprocessing
__lowercase = image_processor.post_process_object_detection(
A_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__lowercase = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(A_ )
__lowercase = [7_5, 7_5, 1_7, 6_3, 1_7]
__lowercase = torch.tensor([3_3_5.0_6_0_9, 79.38_48, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(A_ )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , A_ , atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , A_ )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , A_ ) )
| 714
|
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
def get_matched_characters(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
__lowercase = []
__lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowercase = int(max(0 , i - limit ) )
__lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCamelCase__ )
__lowercase = f'''{_stra[0:_stra.index(UpperCamelCase__ )]} {_stra[_stra.index(UpperCamelCase__ ) + 1:]}'''
return "".join(UpperCamelCase__ )
# matching characters
__lowercase = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = len(UpperCamelCase__ )
# transposition
__lowercase = (
len([(ca, ca) for ca, ca in zip(UpperCamelCase__ , UpperCamelCase__ ) if ca != ca] ) // 2
)
if not match_count:
__lowercase = 0.0
else:
__lowercase = (
1
/ 3
* (
match_count / len(UpperCamelCase__ )
+ match_count / len(UpperCamelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 442
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __UpperCamelCase ):
UpperCamelCase_ : str = ["pixel_values"]
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ) -> None:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCamelCase = get_size_dict(a , default_to_square=a )
_UpperCamelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCamelCase = get_size_dict(a )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCamelCase = get_resize_output_image_size(a , size=size["""shortest_edge"""] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a )
return center_crop(a , size=(size["""height"""], size["""width"""]) , data_format=a , **a )
def A_ ( self , a , a , a = None , **a ) -> np.ndarray:
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(a , default_to_square=a )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(a )
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(a , a ) for image in images]
_UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 612
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase__ : int = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
UpperCAmelCase__ : Any = cvtColor(img, COLOR_BGR2GRAY)
def _lowercase ( ) -> Any:
UpperCamelCase__ : Optional[int] = cn.convert_to_negative(__SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowercase ( ) -> Tuple:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__SCREAMING_SNAKE_CASE , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _lowercase ( ) -> str:
UpperCamelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowercase ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase__ : List[str] = canny.canny(__SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def _lowercase ( ) -> Dict:
assert gg.gaussian_filter(__SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def _lowercase ( ) -> Optional[int]:
# laplace diagonals
UpperCamelCase__ : Optional[int] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCamelCase__ : Tuple = conv.img_convolve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
assert res.any()
def _lowercase ( ) -> List[str]:
assert med.median_filter(__SCREAMING_SNAKE_CASE , 3 ).any()
def _lowercase ( ) -> List[str]:
UpperCamelCase__ , UpperCamelCase__ : int = sob.sobel_filter(__SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def _lowercase ( ) -> Optional[Any]:
UpperCamelCase__ : str = sp.make_sepia(__SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def _lowercase ( __SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ) -> Any:
UpperCamelCase__ : int = bs.Burkes(imread(__SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowercase ( __SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
UpperCamelCase__ : Union[str, Any] = rs.NearestNeighbour(imread(__SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowercase ( ) -> List[str]:
UpperCamelCase__ : Optional[Any] = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
UpperCamelCase__ : List[str] = imread(__SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Dict = image[x_coordinate][y_coordinate]
UpperCamelCase__ : str = lbp.get_neighbors_pixel(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase__ : Any = lbp.local_binary_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 410
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self : Optional[int] , _lowercase : str , _lowercase : List[str]=3 , _lowercase : Dict=3_2 , _lowercase : List[Any]=3 , _lowercase : Any=1_0 , _lowercase : Any=[1_0, 2_0, 3_0, 4_0] , _lowercase : str=[1, 1, 2, 1] , _lowercase : int=True , _lowercase : Tuple=True , _lowercase : Tuple="relu" , _lowercase : Optional[int]=3 , _lowercase : List[Any]=None , ) -> Optional[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = embeddings_size
_lowercase = hidden_sizes
_lowercase = depths
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_act
_lowercase = num_labels
_lowercase = scope
_lowercase = len(_lowercase )
def _lowerCamelCase ( self : Tuple ) -> str:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.num_labels )
_lowercase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : str ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCamelCase ( self : Dict , _lowercase : Tuple , _lowercase : int , _lowercase : Union[str, Any] ) -> Optional[Any]:
_lowercase = RegNetModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowercase = model(_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowerCamelCase ( self : Optional[Any] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str ) -> Tuple:
_lowercase = self.num_labels
_lowercase = RegNetForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowercase = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Dict ) -> Tuple:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase__, lowercase__, unittest.TestCase ):
snake_case_ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self : str ) -> Any:
_lowercase = RegNetModelTester(self )
_lowercase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def _lowerCamelCase ( self : Tuple ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : Tuple ) -> Optional[int]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _lowerCamelCase ( self : Any ) -> Optional[int]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self : int ) -> Optional[Any]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(_lowercase )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def _lowerCamelCase ( self : int ) -> Dict:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowerCamelCase ( self : int ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(config=_lowercase )
for name, module in model.named_modules():
if isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _lowerCamelCase ( self : str ) -> Tuple:
def check_hidden_states_output(_lowercase : Dict , _lowercase : Dict , _lowercase : Optional[Any] ):
_lowercase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowercase = layer_type
_lowercase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def _lowerCamelCase ( self : Dict ) -> Tuple:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def _lowerCamelCase ( self : int ) -> List[Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = RegNetModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __UpperCAmelCase ( ):
_lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : List[Any] ) -> int:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : int ) -> List[Any]:
_lowercase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowercase = model(**_lowercase )
# verify the logits
_lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowercase = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 227
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCamelCase : Union[str, Any] = 1_6
__UpperCamelCase : Optional[int] = 3_2
def __UpperCAmelCase ( _snake_case : Accelerator, _snake_case : int = 1_6, _snake_case : str = "bert-base-cased" ):
_lowercase = AutoTokenizer.from_pretrained(_snake_case )
_lowercase = load_dataset("glue", "mrpc" )
def tokenize_function(_snake_case : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_lowercase = tokenizer(examples["sentence1"], examples["sentence2"], truncation=_snake_case, max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase = datasets.map(
_snake_case, batched=_snake_case, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=_snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(_snake_case : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_snake_case, padding="max_length", max_length=1_2_8, return_tensors="pt" )
return tokenizer.pad(_snake_case, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
_lowercase = DataLoader(
tokenized_datasets["train"], shuffle=_snake_case, collate_fn=_snake_case, batch_size=_snake_case )
_lowercase = DataLoader(
tokenized_datasets["validation"], shuffle=_snake_case, collate_fn=_snake_case, batch_size=_snake_case )
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( _snake_case : Union[str, Any], _snake_case : Tuple ):
# Initialize accelerator
_lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase = config["lr"]
_lowercase = int(config["num_epochs"] )
_lowercase = int(config["seed"] )
_lowercase = int(config["batch_size"] )
_lowercase = args.model_name_or_path
set_seed(_snake_case )
_lowercase , _lowercase = get_dataloaders(_snake_case, _snake_case, _snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase = AutoModelForSequenceClassification.from_pretrained(_snake_case, return_dict=_snake_case )
# Instantiate optimizer
_lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase = optimizer_cls(params=model.parameters(), lr=_snake_case )
if accelerator.state.deepspeed_plugin is not None:
_lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_lowercase = 1
_lowercase = (len(_snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase = get_linear_schedule_with_warmup(
optimizer=_snake_case, num_warmup_steps=0, num_training_steps=_snake_case, )
else:
_lowercase = DummyScheduler(_snake_case, total_num_steps=_snake_case, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase = accelerator.prepare(
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case )
# We need to keep track of how many total steps we have iterated over
_lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
_lowercase = 0
# Now we train the model
_lowercase = evaluate.load("glue", "mrpc" )
_lowercase = 0
_lowercase = {}
for epoch in range(_snake_case, _snake_case ):
model.train()
for step, batch in enumerate(_snake_case ):
_lowercase = model(**_snake_case )
_lowercase = outputs.loss
_lowercase = loss / gradient_accumulation_steps
accelerator.backward(_snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowercase = 0
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase = model(**_snake_case )
_lowercase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowercase , _lowercase = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_snake_case ) - 1:
_lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_snake_case, references=_snake_case, )
_lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", _snake_case )
_lowercase = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
_lowercase = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "all_results.json" ), "w" ) as f:
json.dump(_snake_case, _snake_case )
def __UpperCAmelCase ( ):
_lowercase = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path", type=_snake_case, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=_snake_case, )
parser.add_argument(
"--output_dir", type=_snake_case, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--performance_lower_bound", type=_snake_case, default=_snake_case, help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", )
parser.add_argument(
"--num_epochs", type=_snake_case, default=3, help="Number of train epochs.", )
_lowercase = parser.parse_args()
_lowercase = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(_snake_case, _snake_case )
if __name__ == "__main__":
main()
| 227
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Optional[Any] = tempfile.mkdtemp()
a__ : Any = BlipImageProcessor()
a__ : Optional[int] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model')
a__ : List[Any] = BlipaProcessor(lowercase , lowercase)
processor.save_pretrained(self.tmpdirname)
def __lowercase ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).tokenizer
def __lowercase ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ : int = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ : Any = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__ : List[str] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Optional[Any] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Union[str, Any] = self.prepare_image_inputs()
a__ : Any = image_processor(lowercase , return_tensors='np')
a__ : str = processor(images=lowercase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Union[str, Any] = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : Optional[Any] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : int = 'lower newer'
a__ : Union[str, Any] = processor(text=lowercase)
a__ : Tuple = tokenizer(lowercase , return_token_type_ids=lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = self.get_image_processor()
a__ : List[Any] = self.get_tokenizer()
a__ : List[Any] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[str] = 'lower newer'
a__ : Any = self.prepare_image_inputs()
a__ : Any = processor(text=lowercase , images=lowercase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase):
processor()
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : Optional[Any] = self.get_tokenizer()
a__ : List[Any] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : List[str] = processor.batch_decode(lowercase)
a__ : Tuple = tokenizer.batch_decode(lowercase)
self.assertListEqual(lowercase , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = self.get_image_processor()
a__ : Dict = self.get_tokenizer()
a__ : str = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : int = 'lower newer'
a__ : Union[str, Any] = self.prepare_image_inputs()
a__ : Any = processor(text=lowercase , images=lowercase)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 302
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase : int = get_tests_dir("""fixtures""")
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = mock.Mock()
a__ : Any = 500
a__ : List[Any] = {}
a__ : List[str] = HTTPError
a__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
a__ : Dict = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowercase) as mock_head:
a__ : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json')
def __lowercase ( self) -> int:
'''simple docstring'''
with self.assertRaises(lowercase):
# config is in subfolder, the following should not work without specifying the subfolder
a__ : Optional[int] = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants')
a__ : int = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor')
self.assertIsNotNone(lowercase)
@is_staging_test
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowercase)
@classmethod
def __lowercase ( cls) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-image-processor')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor')
except HTTPError:
pass
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = ViTImageProcessor.from_pretrained(lowercase)
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token)
a__ : Dict = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id='test-image-processor' , push_to_hub=lowercase , use_auth_token=self._token)
a__ : List[str] = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = ViTImageProcessor.from_pretrained(lowercase)
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token)
a__ : Any = ViTImageProcessor.from_pretrained('valid_org/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id='valid_org/test-image-processor-org' , push_to_hub=lowercase , use_auth_token=self._token)
a__ : int = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
def __lowercase ( self) -> List[str]:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
a__ : List[Any] = CustomImageProcessor.from_pretrained(lowercase)
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
a__ : str = AutoImageProcessor.from_pretrained(
F'{USER}/test-dynamic-image-processor' , trust_remote_code=lowercase)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor')
| 302
| 1
|
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (CMStochasticIterativeScheduler,)
_SCREAMING_SNAKE_CASE : List[Any] = 10
def lowerCAmelCase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
__snake_case = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**snake_case_ )
return config
def lowerCAmelCase ( self : Optional[int] ):
__snake_case = 10
__snake_case = self.get_scheduler_config()
__snake_case = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
__snake_case = scheduler.timesteps[0]
__snake_case = scheduler.timesteps[1]
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
__snake_case = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase ( self : int ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCAmelCase ( self : Union[str, Any] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def lowerCAmelCase ( self : Tuple ):
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**snake_case_ )
__snake_case = 1
scheduler.set_timesteps(snake_case_ )
__snake_case = scheduler.timesteps
__snake_case = torch.manual_seed(0 )
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
__snake_case = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
__snake_case = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
__snake_case = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
__snake_case = pred_prev_sample
__snake_case = torch.sum(torch.abs(snake_case_ ) )
__snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowerCAmelCase ( self : Optional[int] ):
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**snake_case_ )
__snake_case = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
__snake_case = scheduler.timesteps
__snake_case = torch.manual_seed(0 )
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__snake_case = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
__snake_case = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
__snake_case = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
__snake_case = pred_prev_sample
__snake_case = torch.sum(torch.abs(snake_case_ ) )
__snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowerCAmelCase ( self : Dict ):
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**snake_case_ )
__snake_case = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case_ )
def lowerCAmelCase ( self : Union[str, Any] ):
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**snake_case_ )
__snake_case = [39, 30, 12, 1, 0]
__snake_case = len(snake_case_ )
with self.assertRaises(snake_case_ , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def lowerCAmelCase ( self : List[Any] ):
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**snake_case_ )
__snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 614
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 614
| 1
|
"""simple docstring"""
import numpy as np
from PIL import Image
def lowerCamelCase__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int ) -> np.ndarray:
lowerCamelCase_ = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# compute the shape of the output matrix
lowerCamelCase_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase_ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase_ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
return updated_arr
def lowerCamelCase__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int ) -> np.ndarray:
lowerCamelCase_ = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# compute the shape of the output matrix
lowerCamelCase_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase_ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase_ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : List[str] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 549
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True ) -> int:
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
return int((number_a + number_a) / 2 )
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> None:
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(_lowerCamelCase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
lowerCamelCase_ = lower
lowerCamelCase_ = higher
lowerCamelCase_ = []
while True:
lowerCamelCase_ = get_avg(_lowerCamelCase , _lowerCamelCase )
last_numbers.append(_lowerCamelCase )
if answer(_lowerCamelCase ) == "low":
lowerCamelCase_ = number
elif answer(_lowerCamelCase ) == "high":
lowerCamelCase_ = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def lowerCamelCase__ ( ) -> None:
lowerCamelCase_ = int(input('Enter lower value : ' ).strip() )
lowerCamelCase_ = int(input('Enter high value : ' ).strip() )
lowerCamelCase_ = int(input('Enter value to guess : ' ).strip() )
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 549
| 1
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_UpperCamelCase = """examples/"""
_UpperCamelCase = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_UpperCamelCase = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_UpperCamelCase = """README.md"""
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
with open(_snake_case , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace("""VERSION""" , _snake_case )
UpperCAmelCase = re_pattern.sub(_snake_case , _snake_case )
with open(_snake_case , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
for folder, directories, fnames in os.walk(_snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_snake_case , _snake_case ) , _snake_case , pattern="""examples""" )
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_snake_case , _snake_case , _snake_case )
if not patch:
update_version_in_examples(_snake_case )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
UpperCAmelCase = """1. Want to contribute a new model?"""
with open(_snake_case , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_snake_case , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_snake_case )
def _a ( ):
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_snake_case ).groups()[0]
return packaging.version.parse(_snake_case )
def _a ( _snake_case=False ):
"""simple docstring"""
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
UpperCAmelCase = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_snake_case ) == 0:
UpperCAmelCase = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_snake_case , patch=_snake_case )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = get_version()
UpperCAmelCase = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_snake_case ) == 0:
UpperCAmelCase = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_snake_case )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 702
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case )
UpperCAmelCase = np.load(_snake_case )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74
| 0
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
return [sentence[i : i + ngram_size] for i in range(len(__lowerCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
a_ = False
@skip_mps
class UpperCAmelCase__ ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCAmelCase__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _UpperCAmelCase ( cls: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
@classmethod
def _UpperCAmelCase ( cls: Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
def _UpperCAmelCase ( self: int ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
__UpperCAmelCase = CLIPTextModel(__lowerCAmelCase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCAmelCase ( self: str , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[str]=0 ) -> List[str]:
'''simple docstring'''
if str(__lowerCAmelCase ).startswith("mps" ):
__UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
__UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__UpperCAmelCase = __UpperCAmelCase = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def _UpperCAmelCase ( self: Tuple ) -> int:
'''simple docstring'''
__UpperCAmelCase = "cpu"
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
__UpperCAmelCase = pipe(**__lowerCAmelCase ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__UpperCAmelCase = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
__UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def _UpperCAmelCase ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def _UpperCAmelCase ( self: int ) -> str:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase ( self: Tuple ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _UpperCAmelCase ( self: Tuple ) -> str:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def _UpperCAmelCase ( self: Optional[Any] ) -> str:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
@classmethod
def _UpperCAmelCase ( cls: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = torch.manual_seed(51 )
__UpperCAmelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase = "a painting of an elephant with glasses"
__UpperCAmelCase = [5, 7]
__UpperCAmelCase = pipe(
prompt=__lowerCAmelCase , token_indices=__lowerCAmelCase , guidance_scale=7.5 , generator=__lowerCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
__UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 286
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
snake_case : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCamelCase__( unittest.TestCase ):
UpperCamelCase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase : List[str] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
__lowercase = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}] )
__lowercase = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
] , )
__lowercase = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
# Legacy behavior
__lowercase = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
__lowercase = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}]] )
__lowercase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}],
] , )
__lowercase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""label""": """LABEL_0""", """score""": 0.5_04},
{"""label""": """LABEL_0""", """score""": 0.5_04},
] , )
@require_torch
def __magic_name__ ( self ):
"""simple docstring"""
import torch
__lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
@require_tf
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] )
@slow
@require_torch
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = pipeline("""text-classification""" )
__lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__lowercase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__lowercase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] )
@slow
@require_tf
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = pipeline("""text-classification""" , framework="""tf""" )
__lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__lowercase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__lowercase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__lowercase = """HuggingFace is in"""
__lowercase = text_classifier(__UpperCAmelCase )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__lowercase = ["""HuggingFace is in """, """Paris is in France"""]
__lowercase = text_classifier(__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__lowercase = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase )
__lowercase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] , )
__lowercase = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__lowercase = text_classifier(__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__lowercase = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__UpperCAmelCase ):
text_classifier(__UpperCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__lowercase = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 566
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Any = logging.get_logger(__name__)
snake_case : Union[str, Any] = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def lowercase__ ( __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + """Fast""" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split("""/""" )
__lowercase = os.path.join(__UpperCamelCase , __UpperCamelCase )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(__UpperCamelCase )[-1][0]
if next_char == "/":
__lowercase = os.path.join(__UpperCamelCase , __UpperCamelCase )
__lowercase = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
__lowercase = tokenizer.save_pretrained(
__UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(__UpperCamelCase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
snake_case : Tuple = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 566
| 1
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __magic_name__( _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCamelCase__ = load_file(_A )
UpperCamelCase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCamelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
UpperCamelCase__ = pipeline.text_encoder
else:
UpperCamelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
UpperCamelCase__ = pipeline.unet
# find the target layer
UpperCamelCase__ = layer_infos.pop(0 )
while len(_A ) > -1:
try:
UpperCamelCase__ = curr_layer.__getattr__(_A )
if len(_A ) > 0:
UpperCamelCase__ = layer_infos.pop(0 )
elif len(_A ) == 0:
break
except Exception:
if len(_A ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCamelCase__ = layer_infos.pop(0 )
UpperCamelCase__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(_A )
else:
pair_keys.append(_A )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCamelCase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCamelCase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_A , _A ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCamelCase__ = state_dict[pair_keys[0]].to(torch.floataa )
UpperCamelCase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_A , _A )
# update visited list
for item in pair_keys:
visited.append(_A )
return pipeline
if __name__ == "__main__":
lowerCamelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.7_5, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
lowerCamelCase_ : Any = parser.parse_args()
lowerCamelCase_ : List[str] = args.base_model_path
lowerCamelCase_ : Any = args.checkpoint_path
lowerCamelCase_ : List[Any] = args.dump_path
lowerCamelCase_ : Dict = args.lora_prefix_unet
lowerCamelCase_ : List[Any] = args.lora_prefix_text_encoder
lowerCamelCase_ : Optional[int] = args.alpha
lowerCamelCase_ : Tuple = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase_ : List[Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 265
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : List[str] , lowercase : List[Any]=1_3 , lowercase : Union[str, Any]=7 , lowercase : Dict=True , lowercase : Optional[int]=True , lowercase : List[Any]=True , lowercase : Dict=True , lowercase : List[str]=9_9 , lowercase : Dict=1_6 , lowercase : Dict=3_6 , lowercase : str=6 , lowercase : List[Any]=6 , lowercase : int=6 , lowercase : Union[str, Any]=3_7 , lowercase : Union[str, Any]="gelu" , lowercase : List[Any]=0.1 , lowercase : List[str]=0.1 , lowercase : str=5_1_2 , lowercase : Any=1_6 , lowercase : str=2 , lowercase : List[Any]=0.0_2 , lowercase : Tuple=3 , lowercase : Dict=4 , lowercase : Dict=None , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = embedding_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_hidden_groups
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def A ( self : List[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Dict , lowercase : str , lowercase : int , lowercase : List[str] , lowercase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
UpperCamelCase__ = model(lowercase , token_type_ids=lowercase )
UpperCamelCase__ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : int , lowercase : List[str] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertForPreTraining(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , sentence_order_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def A ( self : Optional[int] , lowercase : Dict , lowercase : List[Any] , lowercase : Any , lowercase : List[str] , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
'''simple docstring'''
UpperCamelCase__ = AlbertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Optional[Any] , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AlbertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Any , lowercase : Optional[Any] , lowercase : Any , lowercase : Dict , lowercase : Any , lowercase : Optional[int] , lowercase : str , lowercase : Dict ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = AlbertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[str] , lowercase : int , lowercase : Any , lowercase : Tuple , lowercase : List[str] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = AlbertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Any , lowercase : List[str] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = AlbertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
__a : Any = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : List[Any] = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = True
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : Optional[Any]=False ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=lowercase , hidden_size=3_7 )
def A ( self : int ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : int ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
def A ( self : Tuple ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def A ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def A ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def A ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ = type
self.model_tester.create_and_check_model(*lowercase )
@slow
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = AlbertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCamelCase__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(lowercase , attention_mask=lowercase )[0]
UpperCamelCase__ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowercase )
UpperCamelCase__ = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) )
| 265
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase_ :
def __init__( self )-> Tuple:
_A = {}
def UpperCamelCase ( self , _UpperCamelCase )-> None:
_A = {}
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> None:
if nodea not in self.connections:
self.add_node(_UpperCamelCase )
if nodea not in self.connections:
self.add_node(_UpperCamelCase )
_A = probability
def UpperCamelCase ( self )-> list[str]:
return list(self.connections )
def UpperCamelCase ( self , _UpperCamelCase )-> str:
_A = 0
_A = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase_ ( __UpperCamelCase : str , __UpperCamelCase : list[tuple[str, str, float]] , __UpperCamelCase : int ) -> dict[str, int]:
"""simple docstring"""
_A = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_A = Counter(graph.get_nodes() )
_A = start
for _ in range(__UpperCamelCase ):
_A = graph.transition(__UpperCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> int:
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Dict:
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_A = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> Optional[Any]:
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Union[str, Any]:
_A = ()
for resnet in self.resnets:
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> str:
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Dict:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_A = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCamelCase )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> Optional[Any]:
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Dict:
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCamelCase )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =1
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> Dict:
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = resnets
_A = attentions
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> List[Any]:
_A = self.resnets[0](_UpperCamelCase , _UpperCamelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
return hidden_states
| 292
| 1
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a__ ( _snake_case ):
"""simple docstring"""
A__ : str = ['''image_processor''', '''tokenizer''']
A__ : List[str] = '''OwlViTImageProcessor'''
A__ : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self :Union[str, Any] , lowercase__ :int=None , lowercase__ :Optional[int]=None , **lowercase__ :Optional[Any] ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase__ , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase__ , lowercase__ )
def __call__( self :List[Any] , lowercase__ :List[str]=None , lowercase__ :int=None , lowercase__ :Optional[Any]=None , lowercase__ :Optional[Any]="max_length" , lowercase__ :Dict="np" , **lowercase__ :Tuple ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(lowercase__ , lowercase__ ) or (isinstance(lowercase__ , lowercase__ ) and not isinstance(text[0] , lowercase__ )):
lowercase = [self.tokenizer(lowercase__ , padding=lowercase__ , return_tensors=lowercase__ , **lowercase__ )]
elif isinstance(lowercase__ , lowercase__ ) and isinstance(text[0] , lowercase__ ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(lowercase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowercase__ ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(lowercase__ ))
lowercase = self.tokenizer(lowercase__ , padding=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
encodings.append(lowercase__ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
lowercase__ , return_tensors=lowercase__ , **lowercase__ ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCAmelCase ( self :Tuple , *lowercase__ :Optional[Any] , **lowercase__ :Union[str, Any] ):
return self.image_processor.post_process(*lowercase__ , **lowercase__ )
def __UpperCAmelCase ( self :Any , *lowercase__ :str , **lowercase__ :Optional[int] ):
return self.image_processor.post_process_object_detection(*lowercase__ , **lowercase__ )
def __UpperCAmelCase ( self :Optional[Any] , *lowercase__ :Dict , **lowercase__ :Tuple ):
return self.image_processor.post_process_image_guided_detection(*lowercase__ , **lowercase__ )
def __UpperCAmelCase ( self :List[str] , *lowercase__ :Optional[int] , **lowercase__ :List[Any] ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCAmelCase ( self :List[Any] , *lowercase__ :int , **lowercase__ :Dict ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCAmelCase ( self :Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase__ , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self :Optional[Any] ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase__ , )
return self.image_processor
| 314
|
import re
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 1
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def update_area_of_max_square(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__UpperCamelCase :List[str] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__UpperCamelCase :int = update_area_of_max_square(row + 1 , col + 1 )
__UpperCamelCase :str = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__UpperCamelCase :List[str] = 1 + min([right, diagonal, down] )
__UpperCamelCase :Optional[int] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__UpperCamelCase :Optional[Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__UpperCamelCase :Any = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__UpperCamelCase :Dict = 1 + min([right, diagonal, down] )
__UpperCamelCase :Any = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = sub_problem_sol
return sub_problem_sol
else:
return 0
__UpperCamelCase :Dict = [0]
__UpperCamelCase :List[Any] = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = [[0] * (cols + 1) for _ in range(rows + 1 )]
__UpperCamelCase :Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__UpperCamelCase :Tuple = dp_array[row][col + 1]
__UpperCamelCase :List[str] = dp_array[row + 1][col + 1]
__UpperCamelCase :List[str] = dp_array[row + 1][col]
if mat[row][col] == 1:
__UpperCamelCase :Optional[int] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[Any] = 0
return largest_square_area
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [0] * (cols + 1)
__UpperCamelCase :Optional[Any] = [0] * (cols + 1)
__UpperCamelCase :List[str] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__UpperCamelCase :Optional[Any] = current_row[col + 1]
__UpperCamelCase :List[str] = next_row[col + 1]
__UpperCamelCase :Optional[Any] = next_row[col]
if mat[row][col] == 1:
__UpperCamelCase :Union[str, Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 167
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowercase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
a__ : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
__UpperCamelCase :Union[str, Any] = import_module('''tasks''' )
try:
__UpperCamelCase :int = getattr(SCREAMING_SNAKE_CASE , model_args.task_type )
__UpperCamelCase :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase :Optional[Any] = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase :Dict[int, str] = dict(enumerate(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase :str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase :Dict = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase :Optional[int] = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
__UpperCamelCase :Optional[int] = np.argmax(SCREAMING_SNAKE_CASE , axis=2 )
__UpperCamelCase , __UpperCamelCase :int = preds.shape
__UpperCamelCase :str = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCamelCase :int = [[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"precision": precision_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"recall": recall_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"f1": fa_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
}
# Data collator
__UpperCamelCase :Tuple = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase :Union[str, Any] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase :int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCamelCase :Union[str, Any] = trainer.evaluate()
__UpperCamelCase :List[str] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
__UpperCamelCase :Any = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = trainer.predict(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Tuple = align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
__UpperCamelCase :List[str] = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 167
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : str = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase ) ->np.ndarray:
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase__ ( _lowerCamelCase ) ->np.ndarray:
return vector * sigmoid(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 209
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''speech_to_text'''
UpperCAmelCase : List[Any] = ['''past_key_values''']
UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ):
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(_UpperCAmelCase )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 7
| 0
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__A : Tuple = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __snake_case ( datasets.BuilderConfig):
"""simple docstring"""
lowercase = None
def UpperCamelCase_ ( A__ : "pyspark.sql.DataFrame" , A__ : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCAmelCase_ : str = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowerCAmelCase_ : List[Any] = df_with_partition_id.select("""*""" ).where(f'part_id = {partition_id}' ).drop("""part_id""" )
lowerCAmelCase_ : str = partition_df.collect()
lowerCAmelCase_ : Union[str, Any] = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __snake_case ( _BaseExamplesIterable):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : Optional[int]=None , ) -> Any:
lowerCAmelCase_ : Union[str, Any] = df
lowerCAmelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCAmelCase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : int ) -> Any:
yield from self.generate_examples_fn()
def __lowercase ( self : Optional[int] , lowerCamelCase : np.random.Generator ) -> "SparkExamplesIterable":
lowerCAmelCase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> "SparkExamplesIterable":
lowerCAmelCase_ : int = self.split_shard_indices_by_worker(lowerCamelCase , lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase )
@property
def __lowercase ( self : Dict ) -> int:
return len(self.partition_order )
class __snake_case ( datasets.DatasetBuilder):
"""simple docstring"""
lowercase = SparkConfig
def __init__( self : Tuple , lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : str = None , lowerCamelCase : str = None , **lowerCamelCase : Tuple , ) -> Dict:
import pyspark
lowerCAmelCase_ : int = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCAmelCase_ : List[str] = df
lowerCAmelCase_ : Union[str, Any] = working_dir
super().__init__(
cache_dir=lowerCamelCase , config_name=str(self.df.semanticHash() ) , **lowerCamelCase , )
def __lowercase ( self : Optional[Any] ) -> List[str]:
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCamelCase : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCamelCase )
lowerCAmelCase_ : str = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCAmelCase_ : Dict = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __lowercase ( self : Optional[Any] ) -> Dict:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self : str , lowerCamelCase : datasets.download.download_manager.DownloadManager ) -> List[str]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowercase ( self : Tuple , lowerCamelCase : List[str] ) -> List[str]:
import pyspark
def get_arrow_batch_size(lowerCamelCase : Union[str, Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowerCAmelCase_ : Optional[Any] = self.df.count()
lowerCAmelCase_ : Dict = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCAmelCase_ : str = (
self.df.limit(lowerCamelCase )
.repartition(1 )
.mapInArrow(lowerCamelCase , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCAmelCase_ : str = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCAmelCase_ : int = min(lowerCamelCase , int(approx_total_size / max_shard_size ) )
lowerCAmelCase_ : Optional[Any] = self.df.repartition(lowerCamelCase )
def __lowercase ( self : List[str] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
lowerCAmelCase_ : Optional[Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowerCAmelCase_ : Tuple = os.path.join(self._working_dir , os.path.basename(lowerCamelCase ) ) if self._working_dir else fpath
lowerCAmelCase_ : Optional[int] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCAmelCase_ : str = self.config.features
lowerCAmelCase_ : Optional[Any] = self._writer_batch_size
lowerCAmelCase_ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCAmelCase_ : Optional[int] = pyspark.TaskContext().taskAttemptId()
lowerCAmelCase_ : List[Any] = next(lowerCamelCase , lowerCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Any = writer_class(
features=lowerCamelCase , path=working_fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , writer_batch_size=lowerCamelCase , storage_options=lowerCamelCase , embed_local_files=lowerCamelCase , )
lowerCAmelCase_ : List[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCAmelCase_ : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowerCAmelCase_ : List[Any] = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , writer_batch_size=lowerCamelCase , storage_options=lowerCamelCase , embed_local_files=lowerCamelCase , )
lowerCAmelCase_ : List[Any] = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase )
if writer._num_bytes > 0:
lowerCAmelCase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase ) ):
lowerCAmelCase_ : Optional[Any] = os.path.join(os.path.dirname(lowerCamelCase ) , os.path.basename(lowerCamelCase ) )
shutil.move(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = (
self.df.mapInArrow(lowerCamelCase , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowercase ( self : Dict , lowerCamelCase : "datasets.SplitGenerator" , lowerCamelCase : str = "arrow" , lowerCamelCase : Optional[Union[str, int]] = None , lowerCamelCase : Optional[int] = None , **lowerCamelCase : List[str] , ) -> Optional[int]:
self._validate_cache_dir()
lowerCAmelCase_ : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = not is_remote_filesystem(self._fs )
lowerCAmelCase_ : Dict = os.path.join if is_local else posixpath.join
lowerCAmelCase_ : Tuple = """-TTTTT-SSSSS-of-NNNNN"""
lowerCAmelCase_ : Optional[Any] = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
lowerCAmelCase_ : List[Any] = path_join(self._output_dir , lowerCamelCase )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Tuple = []
for task_id, content in self._prepare_split_single(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
(
lowerCAmelCase_
) : List[str] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase )
lowerCAmelCase_ : List[Any] = total_num_examples
lowerCAmelCase_ : Tuple = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
lowerCAmelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCAmelCase_ : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , ):
rename(
lowerCamelCase , fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , fpath.replace("""TTTTT-SSSSS""" , F'{global_shard_id:05d}' ).replace("""NNNNN""" , F'{total_shards:05d}' ) , )
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Tuple = 0
for i in range(len(lowerCamelCase ) ):
lowerCAmelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase , len(lowerCamelCase ) ).map(lambda lowerCamelCase : _rename_shard(*lowerCamelCase ) ).collect()
else:
# don't use any pattern
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , fpath.replace(lowerCamelCase , """""" ) , )
def __lowercase ( self : Optional[int] , lowerCamelCase : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 714
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = XLMTokenizer
lowercase = False
def __lowercase ( self : Optional[Any] ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase_ : List[Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
lowerCAmelCase_ : int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
def __lowercase ( self : Dict , lowerCamelCase : int ) -> Optional[Any]:
lowerCAmelCase_ : Tuple = """lower newer"""
lowerCAmelCase_ : List[str] = """lower newer"""
return input_text, output_text
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : int = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ : Dict = """lower"""
lowerCAmelCase_ : Tuple = ["""low""", """er</w>"""]
lowerCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Dict = tokens + ["""<unk>"""]
lowerCAmelCase_ : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
@slow
def __lowercase ( self : Dict ) -> Tuple:
lowerCAmelCase_ : Tuple = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
lowerCAmelCase_ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowerCAmelCase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 398
| 0
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> List[Any]:
super().__init__(*lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = super().add_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_)
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
''' `placeholder_token` that is not already in the tokenizer.''')
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=1 , **lowerCamelCase_) -> int:
UpperCamelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_)
output.append(lowerCamelCase_)
else:
UpperCamelCase = []
for i in range(lowerCamelCase_):
UpperCamelCase = placeholder_token + F'_{i}'
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_)
output.append(lowerCamelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent')
UpperCamelCase = output
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0) -> Any:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = []
for i in range(len(lowerCamelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCamelCase = self.token_map[placeholder_token]
UpperCamelCase = tokens[: 1 + int(len(lowerCamelCase_) * prop_tokens_to_load)]
if vector_shuffle:
UpperCamelCase = copy.copy(lowerCamelCase_)
random.shuffle(lowerCamelCase_)
UpperCamelCase = text.replace(lowerCamelCase_ , ''' '''.join(lowerCamelCase_))
return text
def __call__( self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0 , **lowerCamelCase_) -> Tuple:
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_) , *lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0 , **lowerCamelCase_) -> List[Any]:
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_) , *lowerCamelCase_ , **lowerCamelCase_ , )
| 34
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a : int = logging.get_logger(__name__)
class lowercase(_lowercase ):
__snake_case: Any = ['input_features', 'is_longer']
def __init__( self , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=4_8_0_0_0 , __SCREAMING_SNAKE_CASE=4_8_0 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=1_0_2_4 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 1_4_0_0_0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fusion" , __SCREAMING_SNAKE_CASE = "repeatpad" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
a__ = top_db
a__ = truncation
a__ = padding
a__ = fft_window_size
a__ = (fft_window_size >> 1) + 1
a__ = hop_length
a__ = max_length_s
a__ = max_length_s * sampling_rate
a__ = sampling_rate
a__ = frequency_min
a__ = frequency_max
a__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=__SCREAMING_SNAKE_CASE , max_frequency=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , norm=__SCREAMING_SNAKE_CASE , mel_scale='htk' , )
a__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=__SCREAMING_SNAKE_CASE , max_frequency=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , norm='slaney' , mel_scale='slaney' , )
def lowercase__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
a__ = copy.deepcopy(self.__dict__ )
a__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> np.ndarray:
"""simple docstring"""
a__ = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__SCREAMING_SNAKE_CASE , log_mel='dB' , )
return log_mel_spectrogram.T
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
a__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
a__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
a__ = [0]
# randomly choose index for each part
a__ = np.random.choice(ranges[0] )
a__ = np.random.choice(ranges[1] )
a__ = np.random.choice(ranges[2] )
a__ = mel[idx_front : idx_front + chunk_frames, :]
a__ = mel[idx_middle : idx_middle + chunk_frames, :]
a__ = mel[idx_back : idx_back + chunk_frames, :]
a__ = torch.tensor(mel[None, None, :] )
a__ = torch.nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=[chunk_frames, 6_4] , mode='bilinear' , align_corners=__SCREAMING_SNAKE_CASE )
a__ = mel_shrink[0][0].numpy()
a__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
a__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
a__ = len(__SCREAMING_SNAKE_CASE ) - max_length
a__ = np.random.randint(0 , overflow + 1 )
a__ = waveform[idx : idx + max_length]
a__ = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
a__ = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters )
a__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
a__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
a__ = np.stack([mel, mel, mel, mel] , axis=0 )
a__ = False
else:
a__ = self._random_mel_fusion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
a__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
a__ = int(max_length / len(__SCREAMING_SNAKE_CASE ) )
a__ = np.stack(np.tile(__SCREAMING_SNAKE_CASE , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
a__ = int(max_length / len(__SCREAMING_SNAKE_CASE ) )
a__ = np.stack(np.tile(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
a__ = np.pad(__SCREAMING_SNAKE_CASE , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
a__ = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters )
a__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
a__ = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
a__ = truncation if truncation is not None else self.truncation
a__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a__ = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
a__ = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
a__ = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ = [np.asarray(__SCREAMING_SNAKE_CASE )]
# convert to mel spectrogram, truncate and pad if needed.
a__ = [
self._get_input_mel(__SCREAMING_SNAKE_CASE , max_length if max_length else self.nb_max_samples , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for waveform in raw_speech
]
a__ = []
a__ = []
for mel, longer in padded_inputs:
input_mel.append(__SCREAMING_SNAKE_CASE )
is_longer.append(__SCREAMING_SNAKE_CASE )
if truncation == "fusion" and sum(__SCREAMING_SNAKE_CASE ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
a__ = np.random.randint(0 , len(__SCREAMING_SNAKE_CASE ) )
a__ = True
if isinstance(input_mel[0] , __SCREAMING_SNAKE_CASE ):
a__ = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
a__ = [[longer] for longer in is_longer]
a__ = {'input_features': input_mel, 'is_longer': is_longer}
a__ = BatchFeature(__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
a__ = input_features.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return input_features
| 273
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 714
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , A_ : Optional[Any] , A_ : Dict=1_3 , A_ : str=7 , A_ : Union[str, Any]=True , A_ : int=True , A_ : Any=False , A_ : str=True , A_ : int=9_9 , A_ : int=3_2 , A_ : Optional[int]=5 , A_ : List[str]=4 , A_ : int=6_4 , A_ : Optional[int]="gelu" , A_ : List[Any]=0.1 , A_ : int=0.1 , A_ : List[str]=5_1_2 , A_ : Optional[Any]=1_6 , A_ : int=2 , A_ : Optional[int]=0.02 , A_ : Any=3 , A_ : Optional[Any]=4 , A_ : Union[str, Any]=None , A_ : Union[str, Any]=2 , A_ : Tuple=2 , A_ : Optional[int]=2 , A_ : List[Any]=2 , A_ : List[str]=4 , A_ : Union[str, Any]=1 , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Union[str, Any] = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Tuple = num_labels
_lowerCAmelCase : Dict = num_choices
_lowerCAmelCase : Optional[Any] = scope
_lowerCAmelCase : Union[str, Any] = q_groups
_lowerCAmelCase : Tuple = k_groups
_lowerCAmelCase : str = v_groups
_lowerCAmelCase : Tuple = post_attention_groups
_lowerCAmelCase : Tuple = intermediate_groups
_lowerCAmelCase : List[Any] = output_groups
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : int = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __magic_name__ ( self : List[str] , A_ : Dict , A_ : Union[str, Any] , A_ : List[str] , A_ : Optional[int] , A_ : str , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertModel(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(A_ , A_ )
_lowerCAmelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Optional[int] , A_ : int , A_ : Dict , A_ : Any , A_ : List[Any] , A_ : List[Any] , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : int = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Optional[int] , A_ : Union[str, Any] , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] , A_ : List[str] , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Tuple , A_ : Optional[int] , A_ : Dict , A_ : str , A_ : Tuple , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = SqueezeBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Any = SqueezeBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : Any = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple , A_ : Tuple , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : List[Any] , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_choices
_lowerCAmelCase : Dict = SqueezeBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[int] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( A , A , unittest.TestCase ):
"""simple docstring"""
_lowercase : Tuple = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowercase : Optional[Any] = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : int = True
_lowercase : List[str] = False
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : int = SqueezeBertModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=A_ , dim=3_7 )
def __magic_name__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*A_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*A_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A_ )
@slow
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = SqueezeBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
_lowerCAmelCase : Optional[int] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
_lowerCAmelCase : List[str] = model(A_ )[0]
_lowerCAmelCase : Any = torch.Size((1, 3) )
self.assertEqual(output.shape , A_ )
_lowerCAmelCase : Any = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-4 ) )
| 503
| 0
|
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 1_00 ) -> int:
"""simple docstring"""
__snake_case = set()
__snake_case = 0
__snake_case = n + 1 # maximum limit
for a in range(2 , SCREAMING_SNAKE_CASE ):
for b in range(2 , SCREAMING_SNAKE_CASE ):
__snake_case = a**b # calculates the current power
collect_powers.add(SCREAMING_SNAKE_CASE ) # adds the result to the set
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 163
|
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
"""simple docstring"""
__snake_case = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__snake_case = remove_duplicates(key.upper() )
__snake_case = len(SCREAMING_SNAKE_CASE )
# First fill cipher with key characters
__snake_case = {alphabet[i]: char for i, char in enumerate(SCREAMING_SNAKE_CASE )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(SCREAMING_SNAKE_CASE ) , 26 ):
__snake_case = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__snake_case = alphabet[i - offset]
__snake_case = char
return cipher_alphabet
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
__snake_case = input("Enter message to encode or decode: " ).strip()
__snake_case = input("Enter keyword: " ).strip()
__snake_case = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
__snake_case = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
__snake_case = create_cipher_map(SCREAMING_SNAKE_CASE )
print(func(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 163
| 1
|
"""simple docstring"""
from collections import defaultdict
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : Any = True
for v in tree[start]:
if v not in visited:
ret += dfs(_snake_case )
if ret % 2 == 0:
cuts.append(_snake_case )
return ret
def lowercase_ ( ):
dfs(1 )
if __name__ == "__main__":
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = 1_0, 9
UpperCAmelCase__ : List[str] = defaultdict(list)
UpperCAmelCase__ : dict[int, bool] = {}
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 545
|
"""simple docstring"""
import baseaa
def lowercase_ ( _snake_case ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowercase_ ( _snake_case ):
return baseaa.baadecode(_snake_case ).decode("""utf-8""" )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = 'Hello World!'
UpperCAmelCase__ : Tuple = baseaa_encode(test)
print(encoded)
UpperCAmelCase__ : Tuple = baseaa_decode(encoded)
print(decoded)
| 545
| 1
|
"""simple docstring"""
from __future__ import annotations
A__ : List[str] = '#'
class lowercase__ :
def __init__( self : List[Any] ):
lowerCamelCase_ : dict ={}
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str ):
lowerCamelCase_ : Optional[Any] =self._trie
for char in text:
if char not in trie:
lowerCamelCase_ : Tuple ={}
lowerCamelCase_ : Optional[int] =trie[char]
lowerCamelCase_ : Tuple =True
def UpperCAmelCase__ ( self : Any , snake_case__ : str ):
lowerCamelCase_ : Dict =self._trie
for char in prefix:
if char in trie:
lowerCamelCase_ : Dict =trie[char]
else:
return []
return self._elements(snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : dict ):
lowerCamelCase_ : Dict =[]
for c, v in d.items():
lowerCamelCase_ : List[Any] =[" "] if c == END else [(c + s) for s in self._elements(snake_case__ )]
result.extend(snake_case__ )
return tuple(snake_case__ )
A__ : int = Trie()
A__ : List[Any] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _snake_case ( lowerCamelCase__ : str ) -> tuple:
lowerCamelCase_ : Any =trie.find_word(lowerCamelCase__ )
return tuple(string + word for word in suffixes )
def _snake_case ( ) -> None:
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 153
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ ( snake_case__ ):
def __init__( self : str , snake_case__ : Union[str, Any] , snake_case__ : Any=13 , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[int]=True , snake_case__ : Optional[int]=True , snake_case__ : str=True , snake_case__ : int=True , snake_case__ : Union[str, Any]=99 , snake_case__ : Tuple=32 , snake_case__ : Optional[Any]=5 , snake_case__ : Dict=4 , snake_case__ : Any=37 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : Optional[int]=512 , snake_case__ : List[str]=16 , snake_case__ : int=2 , snake_case__ : Any=0.02 , snake_case__ : Union[str, Any]=False , snake_case__ : List[Any]=True , snake_case__ : int="None" , snake_case__ : Any=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
lowerCamelCase_ : int =parent
lowerCamelCase_ : Union[str, Any] =batch_size
lowerCamelCase_ : Optional[Any] =seq_length
lowerCamelCase_ : Tuple =is_training
lowerCamelCase_ : Tuple =use_input_mask
lowerCamelCase_ : str =use_token_type_ids
lowerCamelCase_ : Optional[int] =use_labels
lowerCamelCase_ : Optional[int] =vocab_size
lowerCamelCase_ : Union[str, Any] =hidden_size
lowerCamelCase_ : Union[str, Any] =num_hidden_layers
lowerCamelCase_ : str =num_attention_heads
lowerCamelCase_ : Dict =intermediate_size
lowerCamelCase_ : Dict =hidden_act
lowerCamelCase_ : Optional[Any] =hidden_dropout_prob
lowerCamelCase_ : str =attention_probs_dropout_prob
lowerCamelCase_ : str =max_position_embeddings
lowerCamelCase_ : Tuple =type_vocab_size
lowerCamelCase_ : Optional[int] =type_sequence_label_size
lowerCamelCase_ : Tuple =initializer_range
lowerCamelCase_ : Union[str, Any] =num_labels
lowerCamelCase_ : Tuple =num_choices
lowerCamelCase_ : Optional[Any] =relative_attention
lowerCamelCase_ : List[Any] =position_biased_input
lowerCamelCase_ : Tuple =pos_att_type
lowerCamelCase_ : Optional[int] =scope
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : List[Any] =None
if self.use_input_mask:
lowerCamelCase_ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase_ : List[Any] =None
if self.use_token_type_ids:
lowerCamelCase_ : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : List[Any] =None
lowerCamelCase_ : List[str] =None
lowerCamelCase_ : List[Any] =None
if self.use_labels:
lowerCamelCase_ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : int =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Optional[Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self : Any , snake_case__ : Dict ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self : Any , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] ):
lowerCamelCase_ : int =DebertaVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : int =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )[0]
lowerCamelCase_ : Any =model(snake_case__ , token_type_ids=snake_case__ )[0]
lowerCamelCase_ : List[Any] =model(snake_case__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Dict ):
lowerCamelCase_ : int =DebertaVaForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : int =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Any , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Dict ):
lowerCamelCase_ : str =self.num_labels
lowerCamelCase_ : Union[str, Any] =DebertaVaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : int =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[str] ):
lowerCamelCase_ : str =self.num_labels
lowerCamelCase_ : List[str] =DebertaVaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Optional[int] =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : List[Any] ):
lowerCamelCase_ : int =DebertaVaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : int =model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Any ):
lowerCamelCase_ : Tuple =DebertaVaForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Tuple =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Any =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] =model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Union[str, Any] =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : List[str] =config_and_inputs
lowerCamelCase_ : List[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
_UpperCAmelCase :Any = False
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Tuple = False
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Tuple =DebertaVaModelTester(self )
lowerCamelCase_ : List[str] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[str] =DebertaVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
pass
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Union[str, Any] =DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
lowerCamelCase_ : Dict =torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCamelCase_ : Union[str, Any] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ : List[Any] =model(snake_case__ , attention_mask=snake_case__ )[0]
# compare the actual values for a slice.
lowerCamelCase_ : List[Any] =torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 153
| 1
|
A = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A = [{"type": "code", "content": INSTALL_CONTENT}]
A = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 711
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__= BartphoTokenizer
A__= False
A__= True
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
UpperCAmelCase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
UpperCAmelCase__ = {"unk_token": "<unk>"}
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
UpperCAmelCase__ = BartphoTokenizer(_lowercase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Dict , **_lowercase : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _UpperCAmelCase ( self : List[Any] , _lowercase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = "This is a là test"
UpperCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = BartphoTokenizer(_lowercase , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCAmelCase__ = "This is a là test"
UpperCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
UpperCAmelCase__ = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCAmelCase__ = tokens + [tokenizer.unk_token]
UpperCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
| 277
| 0
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 377
|
def _UpperCamelCase ( ) ->list[list[int]]:
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__a = generate_large_matrix()
__a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _UpperCamelCase ( lowerCAmelCase_ ) ->None:
assert all(row == sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ) for row in grid )
assert all(list(lowerCAmelCase_ ) == sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ) for col in zip(*lowerCAmelCase_ ) )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = len(lowerCAmelCase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase = (left + right) // 2
UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase = mid + 1
else:
UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCAmelCase_ )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = len(grid[0] )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowerCAmelCase_ ) * len(grid[0] )) - total
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
return len([number for row in grid for number in row if number < 0] )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(lowerCAmelCase_ ):
if number < 0:
total += len(lowerCAmelCase_ ) - i
break
return total
def _UpperCamelCase ( ) ->None:
from timeit import timeit
print("""Running benchmarks""" )
UpperCAmelCase = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase = timeit(F"""{func}(grid=grid)""" , setup=lowerCAmelCase_ , number=5_0_0 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 377
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False ) -> Optional[Any]:
lowerCamelCase_ = 'backbone.' if is_semantic else ''
lowerCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : int=False , _lowerCamelCase : str=False ) -> int:
for i in range(config.num_hidden_layers ):
lowerCamelCase_ = 'backbone.' if is_semantic else ''
# queries, keys and values
lowerCamelCase_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowerCamelCase_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowerCamelCase_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = q_bias
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase_ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowerCamelCase_ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowerCamelCase_ = gamma_a
lowerCamelCase_ = gamma_a
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ) -> Dict:
lowerCamelCase_ = dct.pop(_lowerCamelCase )
lowerCamelCase_ = val
def lowerCamelCase__ ( ) -> Optional[Any]:
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]=False ) -> Any:
lowerCamelCase_ = False if 'rvlcdip' in checkpoint_url else True
lowerCamelCase_ = BeitConfig(use_absolute_position_embeddings=_lowerCamelCase , use_mask_token=_lowerCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase_ = 1024
lowerCamelCase_ = 4096
lowerCamelCase_ = 24
lowerCamelCase_ = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase_ = 16
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = 'rvlcdip-id2label.json'
lowerCamelCase_ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )['model']
lowerCamelCase_ = create_rename_keys(_lowerCamelCase , has_lm_head=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , has_lm_head=_lowerCamelCase )
# load HuggingFace model
lowerCamelCase_ = BeitForMaskedImageModeling(_lowerCamelCase ) if has_lm_head else BeitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image
lowerCamelCase_ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCamelCase )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=_lowerCamelCase , return_tensors='pt' )
lowerCamelCase_ = encoding['pixel_values']
lowerCamelCase_ = model(_lowerCamelCase )
lowerCamelCase_ = outputs.logits
# verify logits
lowerCamelCase_ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCamelCase ), "Shape of logits not as expected"
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
if has_lm_head:
lowerCamelCase_ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowerCamelCase_ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 137
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : str = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 137
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_a : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 213
|
"""simple docstring"""
from __future__ import annotations
_a : str = tuple[int, int, int]
_a : Any = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_a : List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
_a : Tuple = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
_a : int = 'FOBHMDKEXQNRAULPGSJVTYICZW'
_a : List[str] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
_a : Optional[Any] = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
_a : Optional[Any] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
_a : Tuple = 'SGLCPQWZHKXAREONTFBVIYJUDM'
_a : Optional[Any] = 'HVSICLTYKQUBXDWAJZOMFGPREN'
_a : Any = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
_a : int = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
_a : List[str] = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : RotorPositionT ,_lowerCamelCase : RotorSelectionT ,_lowerCamelCase : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_lowerCamelCase ) )) < 3:
_lowerCAmelCase : List[Any] = f"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_lowerCamelCase )
# Checks if rotor positions are valid
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = rotpos
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = f"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_lowerCamelCase )
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = f"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_lowerCamelCase )
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : Dict = f"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_lowerCamelCase )
# Validates string and returns dict
_lowerCAmelCase : Any = _plugboard(_lowerCamelCase )
return rotpos, rotsel, pbdict
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[str] = f"Plugboard setting isn't type string ({type(_lowerCamelCase )})"
raise TypeError(_lowerCamelCase )
elif len(_lowerCamelCase ) % 2 != 0:
_lowerCAmelCase : Dict = f"Odd number of symbols ({len(_lowerCamelCase )})"
raise Exception(_lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(""" """ ,"""""" )
# Checks if all characters are unique
_lowerCAmelCase : Tuple = set()
for i in pbstring:
if i not in abc:
_lowerCAmelCase : Any = f"'{i}' not in list of symbols"
raise Exception(_lowerCamelCase )
elif i in tmppbl:
_lowerCAmelCase : str = f"Duplicate symbol ({i})"
raise Exception(_lowerCamelCase )
else:
tmppbl.add(_lowerCamelCase )
del tmppbl
# Created the dictionary
_lowerCAmelCase : List[Any] = {}
for j in range(0 ,len(_lowerCamelCase ) - 1 ,2 ):
_lowerCAmelCase : List[str] = pbstring[j + 1]
_lowerCAmelCase : str = pbstring[j]
return pb
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : RotorPositionT ,_lowerCamelCase : RotorSelectionT = (rotora, rotora, rotora) ,_lowerCamelCase : str = "" ,) -> str:
_lowerCAmelCase : List[Any] = text.upper()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = _validator(
_lowerCamelCase ,_lowerCamelCase ,plugb.upper() )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = rotor_position
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowerCAmelCase : Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowerCAmelCase : Union[str, Any] = plugboard[symbol]
# rotor ra --------------------------
_lowerCAmelCase : List[str] = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : Optional[int] = rotora[index % len(_lowerCamelCase )]
# rotor rb --------------------------
_lowerCAmelCase : Dict = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : Tuple = rotora[index % len(_lowerCamelCase )]
# rotor rc --------------------------
_lowerCAmelCase : Any = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : int = rotora[index % len(_lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowerCAmelCase : Union[str, Any] = reflector[symbol]
# 2nd rotors
_lowerCAmelCase : Optional[int] = abc[rotora.index(_lowerCamelCase ) - rotorposa]
_lowerCAmelCase : str = abc[rotora.index(_lowerCamelCase ) - rotorposa]
_lowerCAmelCase : int = abc[rotora.index(_lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowerCAmelCase : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
_a : List[str] = 'This is my Python script that emulates the Enigma machine from WWII.'
_a : Optional[Any] = (1, 1, 1)
_a : Optional[int] = 'pictures'
_a : List[Any] = (rotora, rotora, rotora)
_a : List[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 213
| 1
|
import random
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = a[left_index]
snake_case__ : Tuple = left_index + 1
for j in range(left_index + 1 , _UpperCAmelCase ):
if a[j] < pivot:
snake_case__, snake_case__ : Union[str, Any] = a[i], a[j]
i += 1
snake_case__, snake_case__ : List[str] = a[i - 1], a[left_index]
return i - 1
def A__ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
if left < right:
snake_case__ : str = random.randint(_UpperCAmelCase , right - 1 )
snake_case__, snake_case__ : Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
snake_case__ : Union[str, Any] = partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
quick_sort_random(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_UpperCAmelCase , pivot_index + 1 , _UpperCAmelCase ) # recursive quicksort to the right of the pivot point
def A__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = input("Enter numbers separated by a comma:\n" ).strip()
snake_case__ : int = [int(_UpperCAmelCase ) for item in user_input.split("," )]
quick_sort_random(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) )
print(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 710
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = path_or_paths
snake_case__ : Any = split if split or isinstance(lowerCamelCase__ , lowerCamelCase__) else "train"
snake_case__ : Union[str, Any] = features
snake_case__ : str = cache_dir
snake_case__ : Dict = keep_in_memory
snake_case__ : Dict = streaming
snake_case__ : List[str] = num_proc
snake_case__ : Any = kwargs
@abstractmethod
def UpperCAmelCase ( self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = features
snake_case__ : Any = cache_dir
snake_case__ : Any = keep_in_memory
snake_case__ : str = streaming
snake_case__ : List[str] = num_proc
snake_case__ : List[str] = kwargs
@abstractmethod
def UpperCAmelCase ( self) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 150
| 0
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict, _snake_case : Union[str, Any], _snake_case : Dict=7, _snake_case : Optional[Any]=3, _snake_case : int=1_8, _snake_case : Union[str, Any]=3_0, _snake_case : Dict=4_0_0, _snake_case : Optional[Any]=True, _snake_case : str=None, _snake_case : Dict=True, ) ->List[str]:
snake_case__ : Any = size if size is not None else {'height': 1_8, 'width': 1_8}
snake_case__ : List[Any] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : Tuple = image_size
snake_case__ : str = min_resolution
snake_case__ : Any = max_resolution
snake_case__ : Any = do_resize
snake_case__ : Dict = size
snake_case__ : List[Any] = do_normalize
def lowercase_ ( self : Any ) ->int:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ImageGPTImageProcessor if is_vision_available() else None
def lowercase_ ( self : Tuple ) ->Any:
snake_case__ : Union[str, Any] = ImageGPTImageProcessingTester(self )
@property
def lowercase_ ( self : Union[str, Any] ) ->int:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'clusters' ) )
self.assertTrue(hasattr(_snake_case, 'do_resize' ) )
self.assertTrue(hasattr(_snake_case, 'size' ) )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 1_8, 'width': 1_8} )
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2 )
self.assertEqual(image_processor.size, {'height': 4_2, 'width': 4_2} )
def lowercase_ ( self : str ) ->List[str]:
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
snake_case__ : Union[str, Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case, obj[key] ) )
else:
self.assertEqual(obj[key], _snake_case )
def lowercase_ ( self : List[str] ) ->int:
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : str = os.path.join(_snake_case, 'image_processor.json' )
image_processor_first.to_json_file(_snake_case )
snake_case__ : List[Any] = self.image_processing_class.from_json_file(_snake_case ).to_dict()
snake_case__ : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], _snake_case )
def lowercase_ ( self : List[Any] ) ->Union[str, Any]:
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_snake_case )
snake_case__ : Optional[int] = self.image_processing_class.from_pretrained(_snake_case ).to_dict()
snake_case__ : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], _snake_case )
@unittest.skip('ImageGPT requires clusters at initialization' )
def lowercase_ ( self : Dict ) ->Tuple:
pass
def lowercase_ ():
snake_case__ : str = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
snake_case__ : Optional[int] = Image.open(dataset[4]['file'] )
snake_case__ : Dict = Image.open(dataset[5]['file'] )
snake_case__ : Tuple = [imagea, imagea]
return images
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : Optional[int] ) ->int:
snake_case__ : Tuple = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
snake_case__ : int = prepare_images()
# test non-batched
snake_case__ : int = image_processing(images[0], return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (1, 1_0_2_4) )
snake_case__ : str = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist(), _snake_case )
# test batched
snake_case__ : List[str] = image_processing(_snake_case, return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (2, 1_0_2_4) )
snake_case__ : Optional[int] = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist(), _snake_case )
| 478
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
a_ :int = Lock()
def lowercase_ (A : Optional[Any] , A : Optional[Any] , A : List[Any] , A : Optional[Any] , A : Any , A : str , A : Union[str, Any] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case__ : Optional[int] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case__ : Union[str, Any] = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case__ : Any = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case__ : List[Any] = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def lowercase_ (A : str ):
snake_case__ : int = []
snake_case__ : str = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case__ : Any = Pipe()
snake_case__ : Union[str, Any] = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case__ : List[str] = temp_rs
snake_case__ : Tuple = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case__ : Dict = Pipe()
snake_case__ : List[Any] = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case__ : int = temp_rs
snake_case__ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case__ : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ():
snake_case__ : Dict = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case__ : List[Any] = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 478
| 1
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , _snake_case : int , _snake_case : List[str]=13 , _snake_case : List[str]=7 , _snake_case : List[str]=True , _snake_case : List[Any]=True , _snake_case : List[str]=False , _snake_case : List[str]=True , _snake_case : Union[str, Any]=99 , _snake_case : Tuple=64 , _snake_case : List[str]=5 , _snake_case : Tuple=4 , _snake_case : Optional[int]=64 , _snake_case : int="gelu" , _snake_case : int=0.1 , _snake_case : Any=0.1 , _snake_case : List[Any]=512 , _snake_case : Union[str, Any]=16 , _snake_case : Optional[Any]=2 , _snake_case : Dict=0.02 , _snake_case : Optional[Any]=3 , _snake_case : Optional[int]=4 , _snake_case : List[Any]=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowerCAmelCase_ ( self : str ) -> Tuple:
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def lowerCAmelCase_ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : List[str] ) -> int:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Optional[int] , _snake_case : str , _snake_case : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = MPNetModel(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , _snake_case : Optional[int] , _snake_case : int , _snake_case : Any , _snake_case : str , _snake_case : str , _snake_case : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = MPNetForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
_snake_case , attention_mask=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : str , _snake_case : str ) -> int:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MPNetForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , _snake_case : List[Any] , _snake_case : int , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = MPNetForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
_snake_case , attention_mask=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[int] , _snake_case : str , _snake_case : List[str] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MPNetForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
a = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
a = True
def lowerCAmelCase_ ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = MPNetModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def lowerCAmelCase_ ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_snake_case )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_snake_case )
def lowerCAmelCase_ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_snake_case )
def lowerCAmelCase_ ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_snake_case )
def lowerCAmelCase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_snake_case )
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = MPNetModel.from_pretrained("microsoft/mpnet-base" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE__ = model(_snake_case )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _snake_case )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) )
| 538
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 42
a = 42
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 1
@register_to_config
def __init__( self : Tuple , _snake_case : int = 2000 , _snake_case : float = 0.15 , _snake_case : float = 0.01 , _snake_case : float = 1348.0 , _snake_case : float = 1e-5 , _snake_case : int = 1 , ) -> str:
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ = sigma_max
# setable values
SCREAMING_SNAKE_CASE__ = None
self.set_sigmas(_snake_case , _snake_case , _snake_case , _snake_case )
def lowerCAmelCase_ ( self : Tuple , _snake_case : torch.FloatTensor , _snake_case : Optional[int] = None ) -> torch.FloatTensor:
return sample
def lowerCAmelCase_ ( self : int , _snake_case : int , _snake_case : float = None , _snake_case : Union[str, torch.device] = None ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE__ = torch.linspace(1 , _snake_case , _snake_case , device=_snake_case )
def lowerCAmelCase_ ( self : List[Any] , _snake_case : int , _snake_case : float = None , _snake_case : float = None , _snake_case : float = None ) -> str:
SCREAMING_SNAKE_CASE__ = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE__ = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE__ = torch.exp(torch.linspace(math.log(_snake_case ) , math.log(_snake_case ) , _snake_case ) )
SCREAMING_SNAKE_CASE__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCAmelCase_ ( self : Any , _snake_case : Optional[Any] , _snake_case : List[str] ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCAmelCase_ ( self : str , _snake_case : torch.FloatTensor , _snake_case : int , _snake_case : torch.FloatTensor , _snake_case : Optional[torch.Generator] = None , _snake_case : bool = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
SCREAMING_SNAKE_CASE__ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE__ = timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE__ = self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE__ = self.get_adjacent_sigma(_snake_case , _snake_case ).to(sample.device )
SCREAMING_SNAKE_CASE__ = torch.zeros_like(_snake_case )
SCREAMING_SNAKE_CASE__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ = diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE__ = randn_tensor(
sample.shape , layout=sample.layout , generator=_snake_case , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_snake_case , prev_sample_mean=_snake_case )
def lowerCAmelCase_ ( self : List[Any] , _snake_case : torch.FloatTensor , _snake_case : torch.FloatTensor , _snake_case : Optional[torch.Generator] = None , _snake_case : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE__ = randn_tensor(sample.shape , layout=sample.layout , generator=_snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE__ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ = step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = sample + step_size * model_output
SCREAMING_SNAKE_CASE__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_snake_case )
def lowerCAmelCase_ ( self : List[str] , _snake_case : torch.FloatTensor , _snake_case : torch.FloatTensor , _snake_case : torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE__ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_snake_case ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE__ = noise + original_samples
return noisy_samples
def __len__( self : List[Any] ) -> int:
return self.config.num_train_timesteps
| 538
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=3 , lowerCamelCase_=1_8 , lowerCamelCase_=3_0 , lowerCamelCase_=4_0_0 , lowerCamelCase_=True , lowerCamelCase_=3_2 , lowerCamelCase_=True , ) -> Union[str, Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size_divisor
UpperCamelCase = do_rescale
def UpperCAmelCase__ ( self) -> Any:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = GLPNImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = GLPNImageProcessingTester(self)
@property
def UpperCAmelCase__ ( self) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''size_divisor'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''resample'''))
self.assertTrue(hasattr(lowerCamelCase_ , '''do_rescale'''))
def UpperCAmelCase__ ( self) -> str:
pass
def UpperCAmelCase__ ( self) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def UpperCAmelCase__ ( self) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def UpperCAmelCase__ ( self) -> Optional[int]:
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 34
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 32
| 0
|
'''simple docstring'''
import math
def __A ( ) -> None:
'''simple docstring'''
_UpperCamelCase : Any = input("Enter message: " )
_UpperCamelCase : int = int(input(f'''Enter key [2-{len(UpperCAmelCase ) - 1}]: ''' ) )
_UpperCamelCase : str = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
_UpperCamelCase : Any = encrypt_message(UpperCAmelCase ,UpperCAmelCase )
elif mode.lower().startswith("d" ):
_UpperCamelCase : Any = decrypt_message(UpperCAmelCase ,UpperCAmelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'''Output:\n{text + '|'}''' )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : List[Any] = [""] * key
for col in range(UpperCAmelCase ):
_UpperCamelCase : List[str] = col
while pointer < len(UpperCAmelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(UpperCAmelCase )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = math.ceil(len(UpperCAmelCase ) / key )
_UpperCamelCase : Any = key
_UpperCamelCase : Union[str, Any] = (num_cols * num_rows) - len(UpperCAmelCase )
_UpperCamelCase : List[str] = [""] * num_cols
_UpperCamelCase : Dict = 0
_UpperCamelCase : int = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_UpperCamelCase : Tuple = 0
row += 1
return "".join(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 715
|
'''simple docstring'''
def __A ( UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : str = []
_UpperCamelCase : Optional[Any] = set({"(", "[", "{"} )
_UpperCamelCase : int = set({")", "]", "}"} )
_UpperCamelCase : Dict = {"{": "}", "[": "]", "(": ")"}
for i in range(len(UpperCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCAmelCase ) == 0 or (len(UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCAmelCase ) == 0
def __A ( ) -> str:
'''simple docstring'''
_UpperCamelCase : Any = input("Enter sequence of brackets: " )
if is_balanced(UpperCAmelCase ):
print(UpperCAmelCase ,"is balanced" )
else:
print(UpperCAmelCase ,"is not balanced" )
if __name__ == "__main__":
main()
| 204
| 0
|
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Union[str, Any] = [1]
UpperCAmelCase__: str = 0, 0, 0
UpperCAmelCase__: Optional[int] = ugly_nums[ia] * 2
UpperCAmelCase__: Union[str, Any] = ugly_nums[ia] * 3
UpperCAmelCase__: Optional[int] = ugly_nums[ia] * 5
for _ in range(1 ,__snake_case ):
UpperCAmelCase__: Union[str, Any] = min(__snake_case ,__snake_case ,__snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
UpperCAmelCase__: int = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase__: Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase__: int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_00) = }")
| 113
|
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowerCamelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__lowerCamelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__lowerCamelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__lowerCamelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__lowerCamelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def a ( __snake_case : Tuple, __snake_case : str ):
'''simple docstring'''
for tf_name, hf_name in patterns:
UpperCAmelCase_ :Optional[int] = k.replace(__snake_case, __snake_case )
return k
def a ( __snake_case : dict, __snake_case : dict ):
'''simple docstring'''
UpperCAmelCase_ :str = BigBirdPegasusConfig(**__snake_case )
UpperCAmelCase_ :Optional[Any] = BigBirdPegasusForConditionalGeneration(__snake_case )
UpperCAmelCase_ :Dict = torch_model.state_dict()
UpperCAmelCase_ :List[Any] = {}
# separating decoder weights
UpperCAmelCase_ :Tuple = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCAmelCase_ :Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ):
UpperCAmelCase_ :int = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
UpperCAmelCase_ :Union[str, Any] = DECODER_PATTERNS
UpperCAmelCase_ :Any = rename_state_dict_key(__snake_case, __snake_case )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase_ :Tuple = v.T
UpperCAmelCase_ :str = torch.from_numpy(__snake_case )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ):
UpperCAmelCase_ :Any = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
UpperCAmelCase_ :str = REMAINING_PATTERNS
UpperCAmelCase_ :Dict = rename_state_dict_key(__snake_case, __snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase_ :Tuple = v.T
UpperCAmelCase_ :Any = torch.from_numpy(__snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCAmelCase_ :Optional[int] = mapping['''model.embed_positions.weight''']
UpperCAmelCase_ :Tuple = mapping.pop('''model.embed_positions.weight''' )
UpperCAmelCase_ ,UpperCAmelCase_ :List[str] = torch_model.load_state_dict(__snake_case, strict=__snake_case )
UpperCAmelCase_ :List[Any] = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def a ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ :Tuple = tf.train.list_variables(__snake_case )
UpperCAmelCase_ :Optional[int] = {}
UpperCAmelCase_ :Optional[Any] = ['''global_step''']
for name, shape in tqdm(__snake_case, desc='''converting tf checkpoint to dict''' ):
UpperCAmelCase_ :int = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase_ :List[str] = tf.train.load_variable(__snake_case, __snake_case )
UpperCAmelCase_ :str = array
return tf_weights
def a ( __snake_case : str, __snake_case : str, __snake_case : dict ):
'''simple docstring'''
UpperCAmelCase_ :Any = get_tf_weights_as_numpy(__snake_case )
UpperCAmelCase_ :Union[str, Any] = convert_bigbird_pegasus(__snake_case, __snake_case )
torch_model.save_pretrained(__snake_case )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 608
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( _a ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__()
self.register_modules(vqvae=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
_A= randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCAmelCase__ , )
_A= latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A= latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_A= 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A= {}
if accepts_eta:
_A= eta
for t in self.progress_bar(self.scheduler.timesteps ):
_A= self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
_A= self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
_A= self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# decode the image latents with the VAE
_A= self.vqvae.decode(lowerCAmelCase__ ).sample
_A= (image / 2 + 0.5).clamp(0 , 1 )
_A= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A= self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 718
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ = True
except (ImportError, AttributeError):
UpperCAmelCase_ = object
def UpperCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
'''simple docstring'''
pass
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger('''transformers-cli/serving''')
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCAmelCase_ , args.host , args.port , args.workers )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : dict
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[str]
_SCREAMING_SNAKE_CASE : Optional[List[int]]
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : str
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Any
class lowerCAmelCase ( _a ):
@staticmethod
def a__ ( lowerCAmelCase__ ):
_A= parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase__ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase__ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase__ , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase__ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase__ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase__ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase__ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_A= pipeline
_A= host
_A= port
_A= workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"Serving model over {host}:{port}" )
_A= FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
] , timeout=600 , )
def a__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def a__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def a__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ):
try:
_A= self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
_A= self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
def a__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ):
try:
_A= self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
async def a__ ( self , lowerCAmelCase__=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ):
# Check we don't have empty string
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_A= self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(500 , {'error': str(lowerCAmelCase__ )} )
| 476
| 0
|
import string
def _lowercase ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase = ''
for i in sequence:
UpperCamelCase = ord(UpperCamelCase__ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _lowercase ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase = string.ascii_letters
UpperCamelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(UpperCamelCase__ )] if c in letters else c for c in sequence )
def _lowercase ( ):
"""simple docstring"""
from timeit import timeit
print("""Running performance benchmarks...""" )
UpperCamelCase = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(f'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=UpperCamelCase__ )} seconds' )
print(f'> atbash(): {timeit("atbash(printable)" , setup=UpperCamelCase__ )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 386
|
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ):
"""simple docstring"""
__A : List[str] = scheduler
__A : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
__A : List[Any] = split_batches
__A : Any = step_with_optimizer
__A : List[Any] = GradientState()
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__A : Tuple = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def snake_case__ ( self ):
"""simple docstring"""
return self.scheduler.state_dict()
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
self.scheduler.load_state_dict(__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
return self.scheduler.get_lr()
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
return self.scheduler.print_lr(*__lowercase , **__lowercase )
| 365
| 0
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase ( _A : Union[str, Any] , _A : Optional[int] ) -> int:
"""simple docstring"""
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __UpperCamelCase ( _A : Dict , _A : int , _A : Any ) -> str:
"""simple docstring"""
lowerCAmelCase : str = tmp_path / 'cache'
lowerCAmelCase : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : List[str] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __UpperCamelCase ( _A : int , _A : List[Any] , _A : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase : int = tmp_path / 'cache'
lowerCAmelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
lowerCAmelCase : Tuple = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : List[Any] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : str = tmp_path / 'cache'
lowerCAmelCase : Optional[int] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
lowerCAmelCase : Any = features.copy() if features else default_expected_features
lowerCAmelCase : List[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : int = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
lowerCAmelCase : Optional[int] = features.copy()
lowerCAmelCase : int = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Any = tmp_path / 'cache'
lowerCAmelCase : Dict = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase ( _A : Optional[int] , _A : int , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = tmp_path / 'cache'
lowerCAmelCase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCAmelCase : int = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __UpperCamelCase ( _A : Any , _A : str , _A : Dict ) -> List[str]:
"""simple docstring"""
if issubclass(_A , _A ):
lowerCAmelCase : Optional[int] = jsonl_path
elif issubclass(_A , _A ):
lowerCAmelCase : Optional[Any] = [jsonl_path]
lowerCAmelCase : Any = tmp_path / 'cache'
lowerCAmelCase : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCAmelCase : Dict = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def __UpperCamelCase ( _A : int , _A : int , _A : Optional[int]=("train",) ) -> int:
"""simple docstring"""
assert isinstance(_A , _A )
for split in splits:
lowerCAmelCase : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __UpperCamelCase ( _A : Optional[Any] , _A : int , _A : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path / 'cache'
lowerCAmelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : Optional[Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __UpperCamelCase ( _A : Optional[Any] , _A : Any , _A : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = tmp_path / 'cache'
lowerCAmelCase : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCAmelCase : Dict = features.copy() if features else default_expected_features
lowerCAmelCase : Dict = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Dict = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase ( _A : Any , _A : Optional[Any] , _A : Union[str, Any] ) -> str:
"""simple docstring"""
if split:
lowerCAmelCase : Optional[Any] = {split: jsonl_path}
else:
lowerCAmelCase : Any = 'train'
lowerCAmelCase : Tuple = {'train': jsonl_path, 'test': jsonl_path}
lowerCAmelCase : str = tmp_path / 'cache'
lowerCAmelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCAmelCase : str = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCamelCase ( _A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return json.load(_A )
def __UpperCamelCase ( _A : Optional[int] ) -> List[str]:
"""simple docstring"""
return [json.loads(_A ) for line in buffer]
class lowerCAmelCase :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ ).write()
buffer.seek(0 )
lowerCAmelCase : Optional[Any] = load_json_function(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
assert isinstance(exported_content[0] , snake_case__ )
assert len(snake_case__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ , orient=snake_case__ ).write()
buffer.seek(0 )
lowerCAmelCase : List[str] = load_json(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ , num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase : Any = load_json_function(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
assert isinstance(exported_content[0] , snake_case__ )
assert len(snake_case__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ , orient=snake_case__ , num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase : List[Any] = load_json(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case__ ) == 10
def lowercase ( self , snake_case__ ):
with pytest.raises(snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = tmp_path_factory.mktemp('data' ) / f"test.json.{extension}"
lowerCAmelCase : Optional[int] = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(snake_case__ , snake_case__ , compression=snake_case__ ).write()
with fsspec.open(snake_case__ , 'rb' , compression='infer' ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
with fsspec.open(snake_case__ , 'rb' , compression='infer' ) as f:
lowerCAmelCase : Optional[int] = f.read()
assert exported_content == original_content
| 712
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 646
| 0
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCamelCase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ = 1_6000 )-> Optional[int]:
"""simple docstring"""
snake_case_ : List[str] = int(round(sample_rate * max_length ) )
if len(__magic_name__ ) <= sample_length:
return wav
snake_case_ : str = randint(0 ,len(__magic_name__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class A_ :
"""simple docstring"""
a__ = field(default=a_ , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
a__ = field(
default=a_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
a__ = field(
default=a_ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
a__ = field(
default=a_ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
a__ = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
a__ = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
a__ = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
a__ = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
a__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
a__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
a__ = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class A_ :
"""simple docstring"""
a__ = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
a__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
a__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
a__ = field(
default=a_ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
a__ = field(
default=a_ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
a__ = field(
default=a_ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
a__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
a__ = field(
default=a_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
a__ = field(
default=a_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , lowerCAmelCase__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_, snake_case_, snake_case_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_, snake_case_, snake_case_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" ,__magic_name__ ,__magic_name__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
snake_case_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
snake_case_ : int = DatasetDict()
snake_case_ : Dict = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
snake_case_ : Optional[Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
snake_case_ : List[str] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
snake_case_ : List[str] = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
snake_case_ : Optional[Any] = feature_extractor.model_input_names[0]
def train_transforms(__magic_name__ ):
snake_case_ : List[str] = []
for audio in batch[data_args.audio_column_name]:
snake_case_ : List[Any] = random_subsample(
audio["array"] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__magic_name__ )
snake_case_ : Any = feature_extractor(__magic_name__ ,sampling_rate=feature_extractor.sampling_rate )
snake_case_ : Optional[Any] = {model_input_name: inputs.get(__magic_name__ )}
snake_case_ : Dict = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__magic_name__ ):
snake_case_ : Tuple = [audio["array"] for audio in batch[data_args.audio_column_name]]
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ ,sampling_rate=feature_extractor.sampling_rate )
snake_case_ : Dict = {model_input_name: inputs.get(__magic_name__ )}
snake_case_ : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case_ : str = raw_datasets["train"].features[data_args.label_column_name].names
snake_case_, snake_case_ : Tuple = {}, {}
for i, label in enumerate(__magic_name__ ):
snake_case_ : Dict = str(__magic_name__ )
snake_case_ : Union[str, Any] = label
# Load the accuracy metric from the datasets package
snake_case_ : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ ):
snake_case_ : Tuple = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=__magic_name__ ,references=eval_pred.label_ids )
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(__magic_name__ ) ,labelaid=__magic_name__ ,idalabel=__magic_name__ ,finetuning_task="audio-classification" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
snake_case_ : Optional[Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=__magic_name__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ : int = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__magic_name__ ,output_all_columns=__magic_name__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ : Tuple = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__magic_name__ ,output_all_columns=__magic_name__ )
# Initialize our trainer
snake_case_ : List[str] = Trainer(
model=__magic_name__ ,args=__magic_name__ ,train_dataset=raw_datasets["train"] if training_args.do_train else None ,eval_dataset=raw_datasets["eval"] if training_args.do_eval else None ,compute_metrics=__magic_name__ ,tokenizer=__magic_name__ ,)
# Training
if training_args.do_train:
snake_case_ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : List[Any] = last_checkpoint
snake_case_ : str = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics("train" ,train_result.metrics )
trainer.save_metrics("train" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ : List[Any] = trainer.evaluate()
trainer.log_metrics("eval" ,__magic_name__ )
trainer.save_metrics("eval" ,__magic_name__ )
# Write model card and (optionally) push to hub
snake_case_ : Dict = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main()
| 653
|
'''simple docstring'''
from string import ascii_uppercase
__lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = len(__magic_name__ )
snake_case_ : str = 0
while True:
if x == i:
snake_case_ : List[str] = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : str = ""
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = ""
snake_case_ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : List[str] = "THE GERMAN ATTACK"
snake_case_ : List[str] = "SECRET"
snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ )
snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653
| 1
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 111
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = ShapEImgaImgPipeline
A = ['''image''']
A = ['''image''']
A = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
'''simple docstring'''
return 8
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase__ :int = CLIPVisionModel(_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_resize=_lowerCAmelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Any = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowerCAmelCase__ :int = PriorTransformer(**_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase__ :str = ShapERenderer(**_lowerCAmelCase )
return model
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.dummy_prior
lowerCAmelCase__ :str = self.dummy_image_encoder
lowerCAmelCase__ :Optional[Any] = self.dummy_image_processor
lowerCAmelCase__ :Union[str, Any] = self.dummy_renderer
lowerCAmelCase__ :str = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
lowerCAmelCase__ :Any = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase__ :Dict = torch.manual_seed(_lowerCAmelCase )
else:
lowerCAmelCase__ :str = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCAmelCase__ :Tuple = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = "cpu"
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ :Union[str, Any] = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :int = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCAmelCase__ :List[Any] = output.images[0]
lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase__ :str = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch_device == "cpu"
lowerCAmelCase__ :Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.get_dummy_components()
lowerCAmelCase__ :Tuple = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Tuple = 1
lowerCAmelCase__ :List[Any] = 2
lowerCAmelCase__ :List[str] = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase__ :Any = batch_size * [inputs[key]]
lowerCAmelCase__ :Optional[Any] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
lowerCAmelCase__ :int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
lowerCAmelCase__ :Optional[int] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
lowerCAmelCase__ :Tuple = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Any = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase__ :List[Any] = pipe(
_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 111
| 1
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_ = True , snake_case_ = None , snake_case_ = 32 , snake_case_ = True , snake_case_ = 1 / 255 , snake_case_ = True , snake_case_ = True , snake_case_ = [0.4814_5466, 0.457_8275, 0.4082_1073] , snake_case_ = [0.2686_2954, 0.2613_0258, 0.2757_7711] , snake_case_ = True , snake_case_=7 , snake_case_=30 , snake_case_=400 , snake_case_=3 , ) -> str:
SCREAMING_SNAKE_CASE : str =parent
SCREAMING_SNAKE_CASE : int =do_resize
SCREAMING_SNAKE_CASE : Dict =size if size is not None else {'''shortest_edge''': 288}
SCREAMING_SNAKE_CASE : Any =size_divisor
SCREAMING_SNAKE_CASE : str =do_rescale
SCREAMING_SNAKE_CASE : int =rescale_factor
SCREAMING_SNAKE_CASE : Union[str, Any] =do_normalize
SCREAMING_SNAKE_CASE : List[str] =do_center_crop
SCREAMING_SNAKE_CASE : List[Any] =image_mean
SCREAMING_SNAKE_CASE : Optional[int] =image_std
SCREAMING_SNAKE_CASE : Any =do_pad
SCREAMING_SNAKE_CASE : Optional[Any] =batch_size
SCREAMING_SNAKE_CASE : Tuple =num_channels
SCREAMING_SNAKE_CASE : List[str] =min_resolution
SCREAMING_SNAKE_CASE : Any =max_resolution
def __a ( self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self , snake_case_ , snake_case_=False ) -> Tuple:
if not batched:
SCREAMING_SNAKE_CASE : Optional[int] =self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE : Dict =image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] =image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] =image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE : Union[str, Any] =size / min(snake_case_ , snake_case_ )
if h < w:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any =size, scale * w
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int =scale * h, size
SCREAMING_SNAKE_CASE : Tuple =int((1_333 / 800) * size )
if max(snake_case_ , snake_case_ ) > max_size:
SCREAMING_SNAKE_CASE : List[str] =max_size / max(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : Union[str, Any] =newh * scale
SCREAMING_SNAKE_CASE : Dict =neww * scale
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int =int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple =(
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE : int =[]
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : int =max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
SCREAMING_SNAKE_CASE : str =max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self ) -> Any:
SCREAMING_SNAKE_CASE : Optional[Any] =BridgeTowerImageProcessingTester(self )
@property
def __a ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> int:
SCREAMING_SNAKE_CASE : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_std''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
self.assertTrue(hasattr(snake_case_ , '''size_divisor''' ) )
def __a ( self ) -> Union[str, Any]:
pass
def __a ( self ) -> Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] =self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : str =image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict =self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> Dict:
# Initialize image processor
SCREAMING_SNAKE_CASE : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] =self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : int =image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any =self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> Any:
# Initialize image processor
SCREAMING_SNAKE_CASE : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str =self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple =image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple =self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 258
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any =SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : str =4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
SCREAMING_SNAKE_CASE : int =4
SCREAMING_SNAKE_CASE : Optional[Any] =48
SCREAMING_SNAKE_CASE : str ='''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] =[6, 6, 6, 6]
SCREAMING_SNAKE_CASE : Optional[int] =60
SCREAMING_SNAKE_CASE : Any =[6, 6, 6, 6]
SCREAMING_SNAKE_CASE : List[str] ='''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE : str =4
SCREAMING_SNAKE_CASE : List[Any] ='''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[Any] =1
SCREAMING_SNAKE_CASE : Tuple =1
SCREAMING_SNAKE_CASE : str =126
SCREAMING_SNAKE_CASE : Union[str, Any] =7
SCREAMING_SNAKE_CASE : List[str] =255.0
SCREAMING_SNAKE_CASE : Optional[int] =''''''
return config
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
SCREAMING_SNAKE_CASE : List[Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : List[Any] =name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE : Dict =name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
SCREAMING_SNAKE_CASE : Any =name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : Optional[Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE : Optional[Any] =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : Dict =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : List[str] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : str =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Optional[Any] =name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
SCREAMING_SNAKE_CASE : Optional[Any] =name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] =name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
SCREAMING_SNAKE_CASE : int =name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE : str =name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : Optional[int] =name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE : Optional[int] ='''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE : Optional[Any] ='''layernorm.bias'''
if "conv_first" in name:
SCREAMING_SNAKE_CASE : Any =name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
SCREAMING_SNAKE_CASE : Dict =name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
SCREAMING_SNAKE_CASE : Tuple =name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
SCREAMING_SNAKE_CASE : List[str] =name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
SCREAMING_SNAKE_CASE : Tuple =name.replace('''upsample.2''' , '''upsample.convolution_1''' )
SCREAMING_SNAKE_CASE : Optional[int] ='''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
SCREAMING_SNAKE_CASE : Union[str, Any] =name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
SCREAMING_SNAKE_CASE : int =name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
SCREAMING_SNAKE_CASE : Tuple ='''swin2sr.''' + name
return name
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : List[Any] =orig_state_dict.pop(__a )
if "qkv" in key:
SCREAMING_SNAKE_CASE : str =key.split('''.''' )
SCREAMING_SNAKE_CASE : Tuple =int(key_split[1] )
SCREAMING_SNAKE_CASE : Any =int(key_split[4] )
SCREAMING_SNAKE_CASE : Any =config.embed_dim
if "weight" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] =val[:dim, :]
SCREAMING_SNAKE_CASE : str =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : List[str] =val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : Tuple =val[:dim]
SCREAMING_SNAKE_CASE : Optional[int] =val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Tuple =val[-dim:]
pass
else:
SCREAMING_SNAKE_CASE : List[str] =val
return orig_state_dict
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict =get_config(__a )
SCREAMING_SNAKE_CASE : int =SwinaSRForImageSuperResolution(__a )
model.eval()
SCREAMING_SNAKE_CASE : str =torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : str =convert_state_dict(__a , __a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] =model.load_state_dict(__a , strict=__a )
if len(__a ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(__a ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
SCREAMING_SNAKE_CASE : Tuple ='''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
SCREAMING_SNAKE_CASE : List[str] =Image.open(requests.get(__a , stream=__a ).raw ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : Optional[Any] =SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE : int =126 if '''Jpeg''' in checkpoint_url else 256
SCREAMING_SNAKE_CASE : int =Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE : Union[str, Any] =transforms(__a ).unsqueeze(0 )
if config.num_channels == 1:
SCREAMING_SNAKE_CASE : Union[str, Any] =pixel_values[:, 0, :, :].unsqueeze(1 )
SCREAMING_SNAKE_CASE : Tuple =model(__a )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : str =torch.Size([1, 3, 512, 512] )
SCREAMING_SNAKE_CASE : int =torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] =torch.Size([1, 3, 1024, 1024] )
SCREAMING_SNAKE_CASE : int =torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
SCREAMING_SNAKE_CASE : Optional[int] =torch.Size([1, 3, 1024, 1024] )
SCREAMING_SNAKE_CASE : List[str] =torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : str =torch.Size([1, 3, 512, 512] )
SCREAMING_SNAKE_CASE : List[Any] =torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] =torch.Size([1, 3, 1024, 1024] )
SCREAMING_SNAKE_CASE : Optional[int] =torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __a , atol=1E-3 )
print('''Looks ok!''' )
SCREAMING_SNAKE_CASE : List[str] ={
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
SCREAMING_SNAKE_CASE : Optional[int] =url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__a )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
_A = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 258
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( __lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''visual_bert'''
def __init__( self : Optional[Any] ,A_ : Union[str, Any]=3_0522 ,A_ : Tuple=768 ,A_ : Dict=512 ,A_ : Dict=12 ,A_ : Any=12 ,A_ : Optional[Any]=3072 ,A_ : Tuple="gelu" ,A_ : List[Any]=0.1 ,A_ : Tuple=0.1 ,A_ : Optional[int]=512 ,A_ : str=2 ,A_ : List[Any]=0.02 ,A_ : List[Any]=1e-12 ,A_ : List[str]=False ,A_ : Dict=True ,A_ : Optional[int]=1 ,A_ : str=0 ,A_ : Dict=2 ,**A_ : Union[str, Any] ,) -> Optional[int]:
super().__init__(pad_token_id=__a ,bos_token_id=__a ,eos_token_id=__a ,**__a )
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = visual_embedding_dim
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
A = bypass_transformer
A = special_visual_initialize
| 719
|
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 22
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a: Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase ( _a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ) -> Optional[Any]:
super().__init__(**__lowerCAmelCase )
lowercase__ : str = size if size is not None else {'''shortest_edge''': 224}
lowercase__ : List[str] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase__ : Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Any = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name='''crop_size''' )
lowercase__ : Tuple = do_resize
lowercase__ : Any = size
lowercase__ : Tuple = resample
lowercase__ : int = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : Dict = do_rescale
lowercase__ : Dict = rescale_factor
lowercase__ : Optional[Any] = do_normalize
lowercase__ : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : List[str] = do_convert_rgb
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> Union[str, Any]:
lowercase__ : str = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : int = get_resize_output_image_size(__lowerCAmelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> List[Any]:
lowercase__ : Optional[Any] = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> Dict:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> Dict:
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ) -> Optional[Any]:
lowercase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = size if size is not None else self.size
lowercase__ : int = get_size_dict(__lowerCAmelCase , param_name='''size''' , default_to_square=__lowerCAmelCase )
lowercase__ : Optional[Any] = resample if resample is not None else self.resample
lowercase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : str = crop_size if crop_size is not None else self.crop_size
lowercase__ : Optional[Any] = get_size_dict(__lowerCAmelCase , param_name='''crop_size''' , default_to_square=__lowerCAmelCase )
lowercase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase__ : str = image_std if image_std is not None else self.image_std
lowercase__ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Optional[int] = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : int = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase__ : List[str] = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
lowercase__ : Tuple = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase__ : Dict = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase__ : List[Any] = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase__ : List[str] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase__ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 152
|
from manim import *
class __SCREAMING_SNAKE_CASE ( _a ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase__ = Rectangle(height=0.25 , width=0.25 )
UpperCamelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = Text("""CPU""" , font_size=24 )
UpperCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
UpperCamelCase__ = [mem.copy() for i in range(4 )]
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = Text("""GPU""" , font_size=24 )
UpperCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = Text("""Model""" , font_size=24 )
UpperCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
UpperCamelCase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCAmelCase , buff=0.0 )
self.add(__lowerCAmelCase )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = Text("""Loaded Checkpoint""" , font_size=24 )
UpperCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCAmelCase )
UpperCamelCase__ = []
UpperCamelCase__ = []
for i, rect in enumerate(__lowerCAmelCase ):
UpperCamelCase__ = fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 )
target.move_to(__lowerCAmelCase )
ckpt_arr.append(__lowerCAmelCase )
UpperCamelCase__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
UpperCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
UpperCamelCase__ = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCamelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCamelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = Text("""Disk""" , font_size=24 )
UpperCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) )
UpperCamelCase__ = []
for i, rect in enumerate(__lowerCAmelCase ):
UpperCamelCase__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(FadeOut(__lowerCAmelCase ) )
UpperCamelCase__ = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase ) , )
self.wait()
| 619
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'convnextv2'
def __init__( self, __a=3, __a=4, __a=4, __a=None, __a=None, __a="gelu", __a=0.02, __a=1E-12, __a=0.0, __a=224, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = num_stages
_lowerCAmelCase : int = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_lowerCAmelCase : List[Any] = [3, 3, 9, 3] if depths is None else depths
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : int = drop_path_rate
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : str = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.