code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase ( ) -> List[str]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def lowercase ( ) -> Any:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def lowercase ( ) -> List[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE ):
http_head('https://huggingface.co' )
| 205 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
class a__ ( __magic_name__ ):
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_)))
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens")
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token
super().__init__(
eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : List[str] = extra_ids
__UpperCAmelCase : int = 2**8 # utf is 8 bits
# define special tokens dict
__UpperCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__UpperCAmelCase : Any = len(self.special_tokens_encoder)
__UpperCAmelCase : List[Any] = len(UpperCamelCase_)
for i, token in enumerate(UpperCamelCase_):
__UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n
__UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def a_ ( self : List[Any]):
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase_)) + [1]
return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1]
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]):
"""simple docstring"""
if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_)
if token_ids_a is None:
return token_ids_a
else:
__UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_)
return token_ids_a + token_ids_a
def a_ ( self : List[str] , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")]
return tokens
def a_ ( self : Tuple , UpperCamelCase_ : List[Any]):
"""simple docstring"""
if token in self.special_tokens_encoder:
__UpperCAmelCase : Any = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__UpperCAmelCase : int = self.added_tokens_encoder[token]
elif len(UpperCamelCase_) != 1:
__UpperCAmelCase : Optional[Any] = self.unk_token_id
else:
__UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens
return token_id
def a_ ( self : Any , UpperCamelCase_ : List[str]):
"""simple docstring"""
if index in self.special_tokens_decoder:
__UpperCAmelCase : Any = self.special_tokens_decoder[index]
else:
__UpperCAmelCase : List[str] = chr(index - self._num_special_tokens)
return token
def a_ ( self : Dict , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : str = b""
for token in tokens:
if token in self.special_tokens_decoder:
__UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8")
elif token in self.added_tokens_decoder:
__UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8")
elif token in self.special_tokens_encoder:
__UpperCAmelCase : Optional[int] = token.encode("utf-8")
elif token in self.added_tokens_encoder:
__UpperCAmelCase : Optional[Any] = token.encode("utf-8")
else:
__UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)])
bstring += tok_string
__UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore")
return string
def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
return ()
| 77 | 0 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
if len(__snake_case ) <= 1 or n <= 1:
return
insert_next(__snake_case ,n - 1 )
rec_insertion_sort(__snake_case ,n - 1 )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
if index >= len(__snake_case ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCamelCase__ = (
collection[index],
collection[index - 1],
)
insert_next(__snake_case ,index + 1 )
if __name__ == "__main__":
_a = input("Enter integers separated by spaces: ")
_a = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 481 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Union[str, Any] = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : Dict = depths
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Dict = len(UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values
def a_ ( self : Dict):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_)
__UpperCAmelCase : Dict = model(UpperCamelCase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_)
__UpperCAmelCase : str = model(UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = FlaxRegNetModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : Tuple):
"""simple docstring"""
return
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def a_ ( self : Union[str, Any]):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def a_ ( self : Optional[int]):
"""simple docstring"""
pass
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]):
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1)
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_)
@jax.jit
def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_)
with self.subTest("JIT Enabled"):
__UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
__UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple()
self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_))
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class a__ ( unittest.TestCase ):
@cached_property
def a_ ( self : Optional[int]):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040")
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Dict = model(**UpperCamelCase_)
# verify the logits
__UpperCAmelCase : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
| 77 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase ( __a ):
@staticmethod
@abstractmethod
def A_ (__UpperCamelCase ) -> Any:
raise NotImplementedError()
@abstractmethod
def A_ (self ) -> Optional[Any]:
raise NotImplementedError()
| 635 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
A = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
A = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
A = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def a_ ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False):
"""simple docstring"""
__UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 77 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase =[]
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->int:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_UpperCAmelCase =state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
_UpperCAmelCase =in_proj_weight[
: encoder_config.hidden_size, :
]
_UpperCAmelCase =in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_UpperCAmelCase =in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
_UpperCAmelCase =dct.pop(_lowerCamelCase )
_UpperCAmelCase =val
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
if "handwritten" in checkpoint_url:
_UpperCAmelCase ="https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCAmelCase ="https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_UpperCAmelCase =Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
_UpperCAmelCase =ViTConfig(image_size=384 , qkv_bias=_lowerCamelCase )
_UpperCAmelCase =TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_UpperCAmelCase =768
elif "large" in checkpoint_url:
# use ViT-large encoder
_UpperCAmelCase =1024
_UpperCAmelCase =4096
_UpperCAmelCase =24
_UpperCAmelCase =16
_UpperCAmelCase =1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCAmelCase =False
_UpperCAmelCase ="relu"
_UpperCAmelCase =1024
_UpperCAmelCase =True
_UpperCAmelCase =False
_UpperCAmelCase =False
# load HuggingFace model
_UpperCAmelCase =ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase )
_UpperCAmelCase =TrOCRForCausalLM(_lowerCamelCase )
_UpperCAmelCase =VisionEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
model.eval()
# load state_dict of original model, rename some keys
_UpperCAmelCase =torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" , check_hash=_lowerCamelCase )["model"]
_UpperCAmelCase =create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_UpperCAmelCase =state_dict.pop(_lowerCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
_UpperCAmelCase =val
else:
_UpperCAmelCase =val
# load state dict
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image
_UpperCAmelCase =ViTImageProcessor(size=encoder_config.image_size )
_UpperCAmelCase =RobertaTokenizer.from_pretrained("roberta-large" )
_UpperCAmelCase =TrOCRProcessor(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase =processor(images=prepare_img(_lowerCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
_UpperCAmelCase =torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_UpperCAmelCase =model(pixel_values=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
_UpperCAmelCase =outputs.logits
_UpperCAmelCase =torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
_UpperCAmelCase =torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
_UpperCAmelCase =torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
_UpperCAmelCase =torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
_UpperCAmelCase =torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
snake_case__ : Any = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 408 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {"""vocab_file""": """spiece.model"""}
A = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
A = {"""bert_for_seq_generation""": 512}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = []
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Dict = vocab_file
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCamelCase_)
@property
def a_ ( self : List[str]):
"""simple docstring"""
return self.sp_model.get_piece_size()
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.__dict__.copy()
__UpperCAmelCase : List[Any] = None
return state
def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self : Any , UpperCamelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase_)
def a_ ( self : Tuple , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_)
return token
def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : int = []
__UpperCAmelCase : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_) + token
__UpperCAmelCase : List[Any] = []
else:
current_sub_tokens.append(UpperCamelCase_)
out_string += self.sp_model.decode(UpperCamelCase_)
return out_string.strip()
def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase_ , "wb") as fi:
__UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_)
return (out_vocab_file,)
| 77 | 0 |
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : List[Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : int =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : int =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : List[Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Any =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Any =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : int =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Dict =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
def snake_case (*UpperCAmelCase__ , **UpperCAmelCase__ ) -> Optional[int]:
requires_backends(UpperCAmelCase__ , ['torch'] )
def snake_case (*UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[Any]:
requires_backends(UpperCAmelCase__ , ['torch'] )
def snake_case (*UpperCAmelCase__ , **UpperCAmelCase__ ) -> Optional[int]:
requires_backends(UpperCAmelCase__ , ['torch'] )
def snake_case (*UpperCAmelCase__ , **UpperCAmelCase__ ) -> Dict:
requires_backends(UpperCAmelCase__ , ['torch'] )
def snake_case (*UpperCAmelCase__ , **UpperCAmelCase__ ) -> Optional[int]:
requires_backends(UpperCAmelCase__ , ['torch'] )
def snake_case (*UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[Any]:
requires_backends(UpperCAmelCase__ , ['torch'] )
def snake_case (*UpperCAmelCase__ , **UpperCAmelCase__ ) -> List[Any]:
requires_backends(UpperCAmelCase__ , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : int =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : str =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[int] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Any =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Dict =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : int =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : int =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Dict =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Dict =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Dict =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : List[str] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[int] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : List[str] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Any =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : int =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : List[Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[int] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Dict =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : str =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[int] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : int =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : List[Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : List[str] =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : Dict =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
class _lowerCAmelCase( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
a : int =['''torch''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(self , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def _a ( cls , *_lowerCamelCase , **_lowerCamelCase ):
requires_backends(cls , ['torch'] ) | 57 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
A = """true"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple:
"""simple docstring"""
set_seed(42 )
__UpperCAmelCase : Dict = RegressionModel()
__UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase )
__UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase )
model.to(accelerator.device )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase )
return model, ddp_model, dataloader
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
__UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(UpperCamelCase ):
__UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
with accelerator.main_process_first():
__UpperCAmelCase : str = dataset.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
__UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCamelCase ):
if use_longest:
return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase )
__UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches )
__UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = []
for batch in dataloader:
__UpperCAmelCase , __UpperCAmelCase : int = batch.values()
with torch.no_grad():
__UpperCAmelCase : int = model(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], []
for logit, targ in logits_and_targets:
logits.append(UpperCamelCase )
targs.append(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase )
return logits, targs
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert (
len(UpperCamelCase ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}"
def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase )
# First do baseline
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"]
model.to(UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(UpperCamelCase )
with torch.inference_mode():
__UpperCAmelCase : List[str] = model(**UpperCamelCase )
__UpperCAmelCase : str = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] )
__UpperCAmelCase : str = metric.compute()
# Then do distributed
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__UpperCAmelCase : Any = model(**UpperCamelCase )
__UpperCAmelCase : str = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase : Union[str, Any] = batch["labels"]
__UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(UpperCamelCase , UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(UpperCamelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
__UpperCAmelCase : Any = Accelerator()
test_torch_metrics(UpperCamelCase , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 77 | 0 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
SCREAMING_SNAKE_CASE__ = "true"
def lowerCamelCase ( _snake_case : Dict ,_snake_case : List[Any]=82 ,_snake_case : Tuple=16 ):
'''simple docstring'''
set_seed(42 )
lowercase__ = RegressionModel()
lowercase__ = deepcopy(_snake_case )
lowercase__ = RegressionDataset(length=_snake_case )
lowercase__ = DataLoader(_snake_case ,batch_size=_snake_case )
model.to(accelerator.device )
lowercase__ = accelerator.prepare(_snake_case ,_snake_case )
return model, ddp_model, dataloader
def lowerCamelCase ( _snake_case : int ,_snake_case : List[str]=False ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
lowercase__ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(_snake_case : int ):
lowercase__ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=_snake_case ,max_length=_snake_case )
return outputs
with accelerator.main_process_first():
lowercase__ = dataset.map(
_snake_case ,batched=_snake_case ,remove_columns=["idx", "sentence1", "sentence2"] ,)
lowercase__ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(_snake_case : List[Any] ):
if use_longest:
return tokenizer.pad(_snake_case ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(_snake_case ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(_snake_case ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=16 )
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : int ):
'''simple docstring'''
lowercase__ = Accelerator(dispatch_batches=_snake_case ,split_batches=_snake_case )
lowercase__ = get_dataloader(_snake_case ,not dispatch_batches )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=_snake_case )
lowercase__ = accelerator.prepare(_snake_case ,_snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase ( _snake_case : Dict ,_snake_case : Optional[Any] ,_snake_case : int ):
'''simple docstring'''
lowercase__ = []
for batch in dataloader:
lowercase__ = batch.values()
with torch.no_grad():
lowercase__ = model(_snake_case )
lowercase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase__ = [], []
for logit, targ in logits_and_targets:
logits.append(_snake_case )
targs.append(_snake_case )
lowercase__ = torch.cat(_snake_case ), torch.cat(_snake_case )
return logits, targs
def lowerCamelCase ( _snake_case : str ,_snake_case : List[Any]=82 ,_snake_case : Dict=False ,_snake_case : Dict=False ,_snake_case : Union[str, Any]=16 ):
'''simple docstring'''
lowercase__ = get_basic_setup(_snake_case ,_snake_case ,_snake_case )
lowercase__ = generate_predictions(_snake_case ,_snake_case ,_snake_case )
assert (
len(_snake_case ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_snake_case )}'''
def lowerCamelCase ( _snake_case : Any = False ,_snake_case : Optional[int] = False ):
'''simple docstring'''
lowercase__ = evaluate.load("glue" ,"mrpc" )
lowercase__ = get_mrpc_setup(_snake_case ,_snake_case )
# First do baseline
lowercase__ = setup["no"]
model.to(_snake_case )
model.eval()
for batch in dataloader:
batch.to(_snake_case )
with torch.inference_mode():
lowercase__ = model(**_snake_case )
lowercase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_snake_case ,references=batch["labels"] )
lowercase__ = metric.compute()
# Then do distributed
lowercase__ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase__ = model(**_snake_case )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ = batch["labels"]
lowercase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_snake_case ,references=_snake_case )
lowercase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = Accelerator(split_batches=_snake_case ,dispatch_batches=_snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_snake_case ,_snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase__ = Accelerator(split_batches=_snake_case ,dispatch_batches=_snake_case )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_snake_case ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
lowercase__ = Accelerator()
test_torch_metrics(_snake_case ,512 )
accelerator.state._reset_state()
def lowerCamelCase ( _snake_case : Union[str, Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 267 |
"""simple docstring"""
import math
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase )
for i in range(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : List[Any] = i
__UpperCAmelCase : Any = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__UpperCAmelCase : Dict = array[temp_index - 1]
temp_index -= 1
__UpperCAmelCase : str = temp_index_value
return array
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = index
__UpperCAmelCase : List[str] = 2 * index + 1 # Left Node
__UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__UpperCAmelCase : Tuple = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__UpperCAmelCase : int = right_index
if largest != index:
__UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index]
heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _UpperCamelCase ( UpperCamelCase ) -> list:
"""simple docstring"""
__UpperCAmelCase : List[Any] = len(UpperCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i in range(n - 1 , 0 , -1 ):
__UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i]
heapify(UpperCamelCase , 0 , UpperCamelCase )
return array
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = low
__UpperCAmelCase : List[str] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i]
i += 1
def _UpperCamelCase ( UpperCamelCase ) -> list:
"""simple docstring"""
if len(UpperCamelCase ) == 0:
return array
__UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) )
__UpperCAmelCase : List[Any] = 16
return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(UpperCamelCase )
max_depth -= 1
__UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
__UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = p
return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
A = input("""Enter numbers separated by a comma : """).strip()
A = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 77 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
SCREAMING_SNAKE_CASE__ : List[str] =logging.get_logger(__name__)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
_lowerCamelCase : List[Any] = set()
_lowerCamelCase : List[Any] = []
def parse_line(SCREAMING_SNAKE_CASE_ ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : int = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_lowerCamelCase : Optional[Any] = "\n".join(SCREAMING_SNAKE_CASE_ )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE_ )
buffer.clear()
continue
else:
_lowerCamelCase : Optional[int] = line.strip()
buffer.append(SCREAMING_SNAKE_CASE_ )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
_lowerCamelCase : Optional[Any] = set()
_lowerCamelCase : List[str] = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return selected_warnings
if __name__ == "__main__":
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
return values.split(''',''' )
SCREAMING_SNAKE_CASE__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
SCREAMING_SNAKE_CASE__ : List[Any] =parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[Any] =args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
SCREAMING_SNAKE_CASE__ : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
SCREAMING_SNAKE_CASE__ : int =extract_warnings(args.output_dir, args.targets)
SCREAMING_SNAKE_CASE__ : List[Any] =sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 434 |
"""simple docstring"""
import numpy as np
from PIL import Image
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
__UpperCAmelCase : str = np.array(UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = 0
# compute the shape of the output matrix
__UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = 0
return updated_arr
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
__UpperCAmelCase : List[str] = np.array(UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Any = 0
# compute the shape of the output matrix
__UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
A = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 77 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = jnp.ones((batch_size, length) ) / length
return scores
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : int = 20
UpperCAmelCase__ : int = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase_ )
# tweak scores to not be uniform anymore
UpperCAmelCase__ : List[Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase__ : int = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase__ : List[str] = jax.nn.softmax(UpperCamelCase_ , axis=-1 )
UpperCAmelCase__ : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase__ : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase__ : Union[str, Any] = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase_ , scores.copy() , cur_len=UpperCamelCase_ ) , axis=-1 )
UpperCAmelCase__ : List[Any] = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase_ , scores.copy() , cur_len=UpperCamelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Tuple = 10
UpperCAmelCase__ : Any = 2
# create ramp distribution
UpperCAmelCase__ : Tuple = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase__ : Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase__ : str = FlaxTopKLogitsWarper(3 )
UpperCAmelCase__ : Union[str, Any] = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase__ : str = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase__ : str = top_k_warp_safety_check(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Optional[int] = 10
UpperCAmelCase__ : Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase__ : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCAmelCase__ : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase__ : int = np.exp(top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase__ : Optional[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase__ : Optional[Any] = np.broadcast_to(np.arange(UpperCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase__ : int = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
UpperCAmelCase__ : Optional[int] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase__ : int = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = 20
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_ )
# check that min length is applied at length 5
UpperCAmelCase__ : List[Any] = ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase__ : int = 5
UpperCAmelCase__ : Any = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : int = min_dist_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase__ : List[str] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Dict = 15
UpperCAmelCase__ : Optional[int] = min_dist_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 20
UpperCAmelCase__ : Optional[int] = 4
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase__ : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : List[Any] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : List[str] = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase__ : List[Any] = 3
UpperCAmelCase__ : str = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : List[str] = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = 20
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : List[Any] = 5
UpperCAmelCase__ : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase__ : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase__ : Optional[int] = 4
UpperCAmelCase__ : int = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : int = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : Optional[Any] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Dict = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
self.assertFalse(jnp.isinf(UpperCamelCase_ ).any() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : Optional[Any] = 10
UpperCAmelCase__ : Any = 15
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Tuple = 15
# dummy input_ids and scores
UpperCAmelCase__ : Dict = ids_tensor((batch_size, sequence_length) , UpperCamelCase_ )
UpperCAmelCase__ : Tuple = input_ids.copy()
UpperCAmelCase__ : Dict = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = scores.copy()
# instantiate all dist processors
UpperCAmelCase__ : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase__ : List[str] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase__ : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase__ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_ )
UpperCAmelCase__ : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
UpperCAmelCase__ : Any = 10
# no processor list
UpperCAmelCase__ : List[str] = temp_dist_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
UpperCAmelCase__ : int = min_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = bos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = eos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# with processor list
UpperCAmelCase__ : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase__ : Optional[int] = processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : str = 10
UpperCAmelCase__ : Optional[int] = 15
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Dict = 15
# dummy input_ids and scores
UpperCAmelCase__ : Any = ids_tensor((batch_size, sequence_length) , UpperCamelCase_ )
UpperCAmelCase__ : Tuple = input_ids.copy()
UpperCAmelCase__ : str = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : int = scores.copy()
# instantiate all dist processors
UpperCAmelCase__ : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase__ : Optional[int] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase__ : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase__ : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_ )
UpperCAmelCase__ : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = 10
# no processor list
def run_no_processor_list(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : Any = temp_dist_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
UpperCAmelCase__ : int = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
UpperCAmelCase__ : int = min_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
UpperCAmelCase__ : int = bos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
UpperCAmelCase__ : Any = eos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
return scores
# with processor list
def run_processor_list(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase__ : Optional[Any] = processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_ )
return scores
UpperCAmelCase__ : str = jax.jit(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = jax.jit(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = jitted_run_no_processor_list(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Dict = jitted_run_processor_list(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 182 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = """▁"""
A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
A = {
"""google/pegasus-xsum""": 512,
}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PegasusTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_):
raise TypeError(
F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is"
F" {type(UpperCamelCase_)}")
__UpperCAmelCase : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1)
]
if len(set(UpperCamelCase_)) != len(UpperCamelCase_):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.")
__UpperCAmelCase : str = additional_special_tokens_extended
else:
__UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)]
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = vocab_file
__UpperCAmelCase : List[str] = False if not self.vocab_file else True
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}")
return [1 if x in all_special_ids else 0 for x in seq]
def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase_)
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase_) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_):
copyfile(self.vocab_file , UpperCamelCase_)
return (out_vocab_file,)
| 77 | 0 |
import os
import sys
import transformers
UpperCAmelCase_ : Optional[Any] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 491 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase )
# set absolute/relative position embeddings parameter
__UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Any = True
# hparam_utils.py hparams
__UpperCAmelCase : Union[str, Any] = 0.664694
__UpperCAmelCase : Union[str, Any] = 0.207951
__UpperCAmelCase : int = 0.121194
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[str] = 0.0352513
__UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
__UpperCAmelCase : int = 36.4519
__UpperCAmelCase : str = 0.903421
__UpperCAmelCase : Dict = 222.088
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = 0.763141
__UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "TABFACT":
__UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase )
elif task == "MLM":
__UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 77 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = random.Random()
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase=1.0 , __lowerCamelCase=None , __lowerCamelCase=None ) -> Optional[Any]:
'''simple docstring'''
if rng is None:
UpperCAmelCase__ : Tuple = global_rng
UpperCAmelCase__ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=400 , _lowerCAmelCase=2000 , _lowerCAmelCase=10 , _lowerCAmelCase=160 , _lowerCAmelCase=8 , _lowerCAmelCase=0.0 , _lowerCAmelCase=4000 , _lowerCAmelCase=False , _lowerCAmelCase=True , ):
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : List[str] = min_seq_length
UpperCAmelCase__ : Any = max_seq_length
UpperCAmelCase__ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase__ : Dict = padding_value
UpperCAmelCase__ : Any = sampling_rate
UpperCAmelCase__ : Any = return_attention_mask
UpperCAmelCase__ : Any = do_normalize
UpperCAmelCase__ : Optional[Any] = feature_size
UpperCAmelCase__ : Tuple = chunk_length
UpperCAmelCase__ : Any = hop_length
def __UpperCAmelCase ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self , _lowerCAmelCase=False , _lowerCAmelCase=False ):
def _flatten(_lowerCAmelCase ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
UpperCAmelCase__ : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase__ : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ : List[str] = [np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = WhisperFeatureExtractionTester(self )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : str = feat_extract_first.save_pretrained(UpperCamelCase_ )[0]
check_json_file_has_correct_format(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = feat_extract_first.to_dict()
UpperCAmelCase__ : Any = feat_extract_second.to_dict()
UpperCAmelCase__ : int = feat_extract_first.mel_filters
UpperCAmelCase__ : List[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : str = os.path.join(UpperCamelCase_ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = self.feature_extraction_class.from_json_file(UpperCamelCase_ )
UpperCAmelCase__ : str = feat_extract_first.to_dict()
UpperCAmelCase__ : List[Any] = feat_extract_second.to_dict()
UpperCAmelCase__ : Dict = feat_extract_first.mel_filters
UpperCAmelCase__ : List[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase__ : int = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase__ : Tuple = feature_extractor(UpperCamelCase_ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCAmelCase__ : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
UpperCAmelCase__ : Dict = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test batched
UpperCAmelCase__ : List[str] = feature_extractor(UpperCamelCase_ , return_tensors="""np""" ).input_features
UpperCAmelCase__ : str = feature_extractor(UpperCamelCase_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase__ : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase__ : Optional[int] = np.asarray(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = feature_extractor(UpperCamelCase_ , return_tensors="""np""" ).input_features
UpperCAmelCase__ : Any = feature_extractor(UpperCamelCase_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test truncation required
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
UpperCAmelCase__ : List[Any] = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
UpperCAmelCase__ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCAmelCase__ : int = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs_truncated]
UpperCAmelCase__ : Optional[int] = feature_extractor(UpperCamelCase_ , return_tensors="""np""" ).input_features
UpperCAmelCase__ : Optional[int] = feature_extractor(UpperCamelCase_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
def __UpperCAmelCase ( self ):
import torch
UpperCAmelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Dict = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCAmelCase__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase__ : str = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCAmelCase__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCAmelCase__ : List[str] = ds.sort("""id""" ).select(range(UpperCamelCase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
UpperCAmelCase__ : Union[str, Any] = self._load_datasamples(1 )
UpperCAmelCase__ : Optional[int] = WhisperFeatureExtractor()
UpperCAmelCase__ : Dict = feature_extractor(UpperCamelCase_ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCamelCase_ , atol=1e-4 ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : List[Any] = self._load_datasamples(1 )[0]
UpperCAmelCase__ : Dict = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
UpperCAmelCase__ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCamelCase_ )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase_ ) - 1 ) < 1e-3 ) )
| 79 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__UpperCAmelCase : Optional[Any] = v.half()
if save_path is None: # overwrite src_path
__UpperCAmelCase : str = src_path
torch.save(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 77 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 631 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
A = pd.read_csv("""sample_data.csv""", header=None)
A = df.shape[:1][0]
# If you're using some other dataset input the target column
A = df.iloc[:, 1:2]
A = actual_data.values.reshape(len_data, 1)
A = MinMaxScaler().fit_transform(actual_data)
A = 10
A = 5
A = 20
A = len_data - periods * look_back
A = actual_data[:division]
A = actual_data[division - look_back :]
A , A = [], []
A , A = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
A = np.array(train_x)
A = np.array(test_x)
A = np.array([list(i.ravel()) for i in train_y])
A = np.array([list(i.ravel()) for i in test_y])
A = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
A = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
A = model.predict(x_test)
| 77 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["MaskFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ : Tuple = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 205 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A = 250_004
A = 250_020
@require_sentencepiece
@require_tokenizers
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = MBartTokenizer
lowercase_ = MBartTokenizerFast
lowercase_ = True
lowercase_ = True
def a_ ( self : str):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_)
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_)
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def a_ ( self : Dict):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_)
__UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
__UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_)
# Checks everything loads correctly in the same way
__UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_)
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
__UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_)
__UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_)
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_)
# Checks everything loads correctly in the same way
__UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_))
shutil.rmtree(UpperCamelCase_)
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase : Tuple = tempfile.mkdtemp()
__UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
__UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_))
shutil.rmtree(UpperCamelCase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
lowercase_ = "facebook/mbart-large-en-ro"
lowercase_ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowercase_ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def a_ ( cls : int):
"""simple docstring"""
__UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO")
__UpperCAmelCase : Union[str, Any] = 1
return cls
def a_ ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020)
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids)
__UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
__UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_)
__UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , UpperCamelCase_)
__UpperCAmelCase : Tuple = 10
__UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , UpperCamelCase_)
self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001])
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
__UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_)
__UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_)
@require_torch
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt")
__UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
__UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
__UpperCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE])
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt")
__UpperCAmelCase : Any = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt")
__UpperCAmelCase : int = targets["input_ids"]
__UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : int = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR")
self.assertEqual(
nested_simplify(UpperCamelCase_) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 77 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model"}
_a = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_a = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCamelCase__ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase__ = "<|endoftext|>" if eos_token is None else eos_token
lowerCamelCase__ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase__ = unk_token if pad_token is None else pad_token
lowerCamelCase__ = eos_token if bos_token is None else bos_token
else:
lowerCamelCase__ = "<pad>" if pad_token is None else pad_token
lowerCamelCase__ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = remove_space
lowerCamelCase__ = keep_accents
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase__ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase__ = re.compile(
F'[{"".join(map(UpperCamelCase_ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.non_printing_characters_re.sub('''''' , UpperCamelCase_ )
# Normalize whitespaces
lowerCamelCase__ = "".join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCamelCase__ = unicodedata.normalize('''NFC''' , UpperCamelCase_ )
return text
def __lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.preprocess_text(UpperCamelCase_ )
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase_ )
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(UpperCamelCase_ )
lowerCamelCase__ = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase__ = self.preprocess_text(UpperCamelCase_ )
lowerCamelCase__ = self.sp_model.encode(UpperCamelCase_ )
else:
lowerCamelCase__ = [self.preprocess_text(UpperCamelCase_ ) for t in text]
lowerCamelCase__ = self.sp_model.encode(UpperCamelCase_ )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase__ = torch.tensor(UpperCamelCase_ )
return token_ids
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.decode(UpperCamelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCamelCase__ = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(UpperCamelCase_ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=UpperCamelCase_ )
| 481 |
"""simple docstring"""
from typing import Any
class a__ :
def __init__( self : List[str] , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : str = data
__UpperCAmelCase : Optional[Any] = None
class a__ :
def __init__( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = None
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=" ")
__UpperCAmelCase : Tuple = temp.next
print()
def a_ ( self : int , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : List[str] = Node(UpperCamelCase_)
__UpperCAmelCase : str = self.head
__UpperCAmelCase : Optional[int] = new_node
def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Tuple = node_a.next
__UpperCAmelCase : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
__UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data
if __name__ == "__main__":
A = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 77 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = pd.read_csv("sample_data.csv", header=None)
SCREAMING_SNAKE_CASE : int = df.shape[:1][0]
# If you're using some other dataset input the target column
SCREAMING_SNAKE_CASE : int = df.iloc[:, 1:2]
SCREAMING_SNAKE_CASE : List[str] = actual_data.values.reshape(len_data, 1)
SCREAMING_SNAKE_CASE : str = MinMaxScaler().fit_transform(actual_data)
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : Optional[Any] = 5
SCREAMING_SNAKE_CASE : Tuple = 20
SCREAMING_SNAKE_CASE : int = len_data - periods * look_back
SCREAMING_SNAKE_CASE : str = actual_data[:division]
SCREAMING_SNAKE_CASE : Dict = actual_data[division - look_back :]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = [], []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
SCREAMING_SNAKE_CASE : Any = np.array(train_x)
SCREAMING_SNAKE_CASE : str = np.array(test_x)
SCREAMING_SNAKE_CASE : List[Any] = np.array([list(i.ravel()) for i in train_y])
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([list(i.ravel()) for i in test_y])
SCREAMING_SNAKE_CASE : Tuple = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
SCREAMING_SNAKE_CASE : Optional[int] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
SCREAMING_SNAKE_CASE : Tuple = model.predict(x_test)
| 635 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
A = """
Human: <<task>>
Assistant: """
A = """huggingface-tools/default-prompts"""
A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]:
"""simple docstring"""
if prompt_or_repo_id is None:
__UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , UpperCamelCase ) is not None:
return prompt_or_repo_id
__UpperCAmelCase : str = cached_file(
UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(UpperCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 77 | 0 |
from __future__ import annotations
snake_case__ : Union[str, Any] = []
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->bool:
for i in range(len(_lowerCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_lowerCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_lowerCamelCase , -1 , -1 ) , range(_lowerCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_lowerCamelCase , -1 , -1 ) , range(_lowerCamelCase , len(_lowerCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->bool:
if row >= len(_lowerCamelCase ):
solution.append(_lowerCamelCase )
printboard(_lowerCamelCase )
print()
return True
for i in range(len(_lowerCamelCase ) ):
if is_safe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_UpperCAmelCase =1
solve(_lowerCamelCase , row + 1 )
_UpperCAmelCase =0
return False
def lowerCamelCase__ ( _lowerCamelCase ) ->None:
for i in range(len(_lowerCamelCase ) ):
for j in range(len(_lowerCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
snake_case__ : List[Any] = 8
snake_case__ : str = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 408 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
A = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
A_ : Dict = ''
A_ : str = ''
A_ : int = ''
A_ : List[str] = 1 # (0 is vertical, 1 is horizontal)
def snake_case () -> None:
UpperCamelCase_: Optional[Any] = get_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
print('Processing...' )
UpperCamelCase_: List[Any] = update_image_and_anno(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
for index, image in enumerate(UpperCAmelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase_: List[Any] = random_chars(3_2 )
UpperCamelCase_: List[str] = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
UpperCamelCase_: str = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , UpperCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F'''Success {index+1}/{len(UpperCAmelCase__ )} with {file_name}''' )
UpperCamelCase_: Dict = []
for anno in new_annos[index]:
UpperCamelCase_: Any = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(UpperCAmelCase__ )
with open(F'''/{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> tuple[list, list]:
UpperCamelCase_: Optional[Any] = []
UpperCamelCase_: Optional[int] = []
for label_file in glob.glob(os.path.join(UpperCAmelCase__ , '*.txt' ) ):
UpperCamelCase_: str = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(UpperCAmelCase__ ) as in_file:
UpperCamelCase_: int = in_file.readlines()
UpperCamelCase_: Dict = os.path.join(UpperCAmelCase__ , F'''{label_name}.jpg''' )
UpperCamelCase_: int = []
for obj_list in obj_lists:
UpperCamelCase_: Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCAmelCase__ )
labels.append(UpperCAmelCase__ )
return img_paths, labels
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 ) -> tuple[list, list, list]:
UpperCamelCase_: Dict = []
UpperCamelCase_: Tuple = []
UpperCamelCase_: Optional[Any] = []
for idx in range(len(UpperCAmelCase__ ) ):
UpperCamelCase_: Tuple = []
UpperCamelCase_: Optional[Any] = img_list[idx]
path_list.append(UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = anno_list[idx]
UpperCamelCase_: List[str] = cva.imread(UpperCAmelCase__ )
if flip_type == 1:
UpperCamelCase_: List[str] = cva.flip(UpperCAmelCase__ , UpperCAmelCase__ )
for bbox in img_annos:
UpperCamelCase_: int = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase_: List[str] = cva.flip(UpperCAmelCase__ , UpperCAmelCase__ )
for bbox in img_annos:
UpperCamelCase_: Optional[int] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCAmelCase__ )
new_imgs_list.append(UpperCAmelCase__ )
return new_imgs_list, new_annos_lists, path_list
def snake_case (UpperCAmelCase__ = 3_2 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase_: List[str] = ascii_lowercase + digits
return "".join(random.choice(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅') | 57 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class a__ ( nn.Module ):
def __init__( self : Union[str, Any]):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : Optional[int] = nn.Linear(3 , 4)
__UpperCAmelCase : str = nn.BatchNormad(4)
__UpperCAmelCase : int = nn.Linear(4 , 5)
def a_ ( self : str , UpperCamelCase_ : List[str]):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_)))
class a__ ( unittest.TestCase ):
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , model.state_dict())
__UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json")
self.assertTrue(os.path.isfile(UpperCamelCase_))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat")
self.assertTrue(os.path.isfile(UpperCamelCase_))
# TODO: add tests on the fact weights are properly loaded
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_)
with TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {})
__UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat")
self.assertTrue(os.path.isfile(UpperCamelCase_))
self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}})
__UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"])
self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_))
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ModelForTest()
__UpperCAmelCase : Optional[int] = model.state_dict()
__UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k}
__UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key]))
__UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k}
__UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_)
# Duplicates are removed
__UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key]))
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2}
__UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"])
self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2})
__UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
__UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"])
self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
| 77 | 0 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar("T")
class snake_case (Generic[T] ):
lowerCAmelCase__ :List[str] = 42 # Cache store of keys
lowerCAmelCase__ :List[Any] = 42 # References of the keys in cache
lowerCAmelCase__ :int = 10 # Maximum capacity of cache
def __init__( self ,UpperCAmelCase_ ) -> Any:
lowercase__ = deque()
lowercase__ = set()
if not n:
lowercase__ = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ = n
def _a ( self ,UpperCAmelCase_ ) -> List[str]:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase_ )
else:
self.dq_store.remove(UpperCamelCase_ )
self.dq_store.appendleft(UpperCamelCase_ )
self.key_reference.add(UpperCamelCase_ )
def _a ( self ) -> str:
for k in self.dq_store:
print(UpperCamelCase_ )
def __repr__( self ) -> Dict:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 267 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__UpperCAmelCase : Union[str, Any] = n - k
# Calculate C(n,k)
for i in range(UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1)
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
__UpperCAmelCase : Optional[Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase )
if __name__ == "__main__":
A = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 77 | 0 |
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
if index == r:
for j in range(SCREAMING_SNAKE_CASE_ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_lowerCamelCase : Tuple = arr[i]
combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , SCREAMING_SNAKE_CASE_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Dict:
_lowerCamelCase : Optional[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ : Dict =[10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 434 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
A = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_A = get_logger(__name__)
_A = R"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class lowerCamelCase :
'''simple docstring'''
@add_start_docstrings(UpperCamelCase_ )
def __call__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowerCamelCase :
'''simple docstring'''
@add_start_docstrings(UpperCamelCase_ )
def __call__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(UpperCamelCase_ )
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
for processor in self:
UpperCAmelCase__ : Any = inspect.signature(processor.__call__ ).parameters
if len(UpperCamelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
UpperCAmelCase__ : Union[str, Any] = processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
else:
UpperCAmelCase__ : Optional[int] = processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return scores
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
UpperCAmelCase__ : Optional[Any] = temperature
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : str = scores / self.temperature
return scores
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase = -float("""Inf""" ) , _lowerCamelCase = 1 ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
UpperCAmelCase__ : Optional[int] = top_p
UpperCAmelCase__ : List[Any] = filter_value
UpperCAmelCase__ : Tuple = min_tokens_to_keep
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = lax.top_k(UpperCamelCase_ , scores.shape[-1] )
UpperCAmelCase__ : int = jnp.full_like(UpperCamelCase_ , self.filter_value )
UpperCAmelCase__ : List[Any] = jax.nn.softmax(UpperCamelCase_ , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase__ : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase__ : Tuple = jnp.roll(UpperCamelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCamelCase_ )
# min tokens to keep
UpperCAmelCase__ : List[str] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = jnp.where(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = jax.lax.sort_key_val(UpperCamelCase_ , UpperCamelCase_ )[-1]
return next_scores
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase = -float("""Inf""" ) , _lowerCamelCase = 1 ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
UpperCAmelCase__ : List[str] = max(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : List[str] = filter_value
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = scores.shape
UpperCAmelCase__ : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase__ : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase__ : Optional[Any] = lax.top_k(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = jnp.broadcast_to((jnp.arange(UpperCamelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase__ : str = topk_scores.flatten()
UpperCAmelCase__ : int = topk_indices.flatten() + shift
UpperCAmelCase__ : str = next_scores_flat.at[topk_indices_flat].set(UpperCamelCase_ )
UpperCAmelCase__ : Any = next_scores_flat.reshape(UpperCamelCase_ , UpperCamelCase_ )
return next_scores
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = bos_token_id
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = jnp.full(scores.shape , -float("""inf""" ) )
UpperCAmelCase__ : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase__ : List[Any] = jnp.where(UpperCamelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCamelCase_ )
return scores
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = max_length
UpperCAmelCase__ : Optional[Any] = eos_token_id
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = jnp.full(scores.shape , -float("""inf""" ) )
UpperCAmelCase__ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase__ : Any = jnp.where(UpperCamelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCamelCase_ )
return scores
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
UpperCAmelCase__ : List[Any] = min_length
UpperCAmelCase__ : Tuple = eos_token_id
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase__ : Optional[int] = jnp.where(UpperCamelCase_ , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , UpperCamelCase_ )
return scores
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = list(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = begin_index
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : str = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase__ : str = jnp.where(UpperCamelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , UpperCamelCase_ )
return scores
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : str = list(UpperCamelCase_ )
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = dict(UpperCamelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase__ : int = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase__ : Dict = force_token_array.at[index].set(UpperCamelCase_ )
UpperCAmelCase__ : str = jnp.intaa(UpperCamelCase_ )
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
def _force_token(_lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = scores.shape[0]
UpperCAmelCase__ : int = self.force_token_array[generation_idx]
UpperCAmelCase__ : List[Any] = jnp.ones_like(UpperCamelCase_ , dtype=scores.dtype ) * -float("""inf""" )
UpperCAmelCase__ : List[str] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase__ : Dict = lax.dynamic_update_slice(UpperCamelCase_ , UpperCamelCase_ , (0, current_token) )
return new_scores
UpperCAmelCase__ : List[str] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCamelCase_ ) , lambda: scores , ) , )
return scores
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = generate_config.eos_token_id
UpperCAmelCase__ : List[str] = generate_config.no_timestamps_token_id
UpperCAmelCase__ : Dict = generate_config.no_timestamps_token_id + 1
UpperCAmelCase__ : Tuple = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCamelCase_ , """max_initial_timestamp_index""" ):
UpperCAmelCase__ : Optional[int] = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase__ : str = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase__ : Union[str, Any] = model_config.vocab_size
def __call__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : Dict = jnp.where((cur_len - self.begin_index) >= 1 , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Dict = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCamelCase_ , )
UpperCAmelCase__ : str = jnp.where((cur_len - self.begin_index) < 2 , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCamelCase_ , UpperCamelCase_ , )
return jnp.where(
UpperCamelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , UpperCamelCase_ , )
UpperCAmelCase__ : List[Any] = jax.vmap(UpperCamelCase_ )(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Dict = jnp.where(cur_len == self.begin_index , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCamelCase_ , )
UpperCAmelCase__ : Optional[Any] = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase__ : str = jnp.where(
UpperCamelCase_ , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , UpperCamelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase__ : Dict = jax.nn.log_softmax(UpperCamelCase_ , axis=-1 )
def handle_cumulative_probs(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase__ : int = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , UpperCamelCase_ , )
UpperCAmelCase__ : Optional[Any] = jax.vmap(UpperCamelCase_ )(UpperCamelCase_ , UpperCamelCase_ )
return scores
| 182 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18}
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : List[Any] = image_size
__UpperCAmelCase : str = min_resolution
__UpperCAmelCase : Tuple = max_resolution
__UpperCAmelCase : Optional[Any] = do_resize
__UpperCAmelCase : Any = size
__UpperCAmelCase : Any = do_normalize
__UpperCAmelCase : Any = image_mean
__UpperCAmelCase : Optional[Any] = image_std
def a_ ( self : str):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self)
@property
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase_ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase_ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase_ , "size"))
def a_ ( self : Dict):
"""simple docstring"""
pass
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image)
# Test not batched input
__UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray)
# Test not batched input
__UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor)
# Test not batched input
__UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 77 | 0 |
def UpperCamelCase ( _A : Dict = 600851475143 )-> int:
"""simple docstring"""
try:
A__ = int(_A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A__ = 1
A__ = 2
while i * i <= n:
while n % i == 0:
A__ = i
n //= i
i += 1
if n > 1:
A__ = n
return int(_A )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 491 |
"""simple docstring"""
from collections import namedtuple
A = namedtuple("""from_to""", """from_ to""")
A = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ", ".join(UpperCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ", ".join(UpperCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 10_00 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 , snake_case_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"{solution() = }")
| 78 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """perceiver"""
def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ):
super().__init__(**__a )
UpperCAmelCase_ = num_latents
UpperCAmelCase_ = d_latents
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_blocks
UpperCAmelCase_ = num_self_attends_per_block
UpperCAmelCase_ = num_self_attention_heads
UpperCAmelCase_ = num_cross_attention_heads
UpperCAmelCase_ = qk_channels
UpperCAmelCase_ = v_channels
UpperCAmelCase_ = cross_attention_shape_for_attention
UpperCAmelCase_ = self_attention_widening_factor
UpperCAmelCase_ = cross_attention_widening_factor
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
# image classification attributes
UpperCAmelCase_ = image_size
# flow attributes
UpperCAmelCase_ = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = audio_samples_per_frame
UpperCAmelCase_ = samples_per_patch
UpperCAmelCase_ = output_shape
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _lowercase (self : Optional[Any] ):
return 1E-4
def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("input_ids" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 78 | 1 |
'''simple docstring'''
from math import sqrt
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' must been an int and positive"
UpperCAmelCase_ = True
# 0 and 1 are none primes.
if number <= 1:
UpperCAmelCase_ = False
for divisor in range(2 , int(round(sqrt(snake_case_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCAmelCase_ = False
break
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'status' must been from type bool"
return status
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> List[Any]:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCAmelCase_ = list(range(2 , n + 1 ) )
UpperCAmelCase_ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(snake_case_ ) ):
for j in range(i + 1 , len(snake_case_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCAmelCase_ = 0
# filters actual prime numbers.
UpperCAmelCase_ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (n > 2), "'N' must been an int and > 2"
UpperCAmelCase_ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(snake_case_ ):
ans.append(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and number >= 0, "'number' must been an int and >= 0"
UpperCAmelCase_ = [] # this list will be returns of the function.
# potential prime number factors.
UpperCAmelCase_ = 2
UpperCAmelCase_ = number
if number == 0 or number == 1:
ans.append(snake_case_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(snake_case_ ):
while quotient != 1:
if is_prime(snake_case_ ) and (quotient % factor == 0):
ans.append(snake_case_ )
quotient /= factor
else:
factor += 1
else:
ans.append(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> List[Any]:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCAmelCase_ = 0
# prime factorization of 'number'
UpperCAmelCase_ = prime_factorization(snake_case_ )
UpperCAmelCase_ = max(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCAmelCase_ = 0
# prime factorization of 'number'
UpperCAmelCase_ = prime_factorization(snake_case_ )
UpperCAmelCase_ = min(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , snake_case_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Any:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , snake_case_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
assert (
isinstance(snake_case_ , snake_case_ ) and (number > 2) and is_even(snake_case_ )
), "'number' must been an int, even and > 2"
UpperCAmelCase_ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCAmelCase_ = get_prime_numbers(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# run variable for while-loops.
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
# exit variable. for break up the loops
UpperCAmelCase_ = True
while i < len_pn and loop:
UpperCAmelCase_ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCAmelCase_ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and (len(snake_case_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCAmelCase_ = 0
while numbera != 0:
UpperCAmelCase_ = numbera % numbera
UpperCAmelCase_ = numbera
UpperCAmelCase_ = rest
# precondition
assert isinstance(snake_case_ , snake_case_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCAmelCase_ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCAmelCase_ = prime_factorization(snake_case_ )
UpperCAmelCase_ = prime_factorization(snake_case_ )
elif numbera == 1 or numbera == 1:
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCAmelCase_ = prime_fac_a.count(snake_case_ )
UpperCAmelCase_ = prime_fac_a.count(snake_case_ )
for _ in range(max(snake_case_ , snake_case_ ) ):
ans *= n
else:
UpperCAmelCase_ = prime_fac_a.count(snake_case_ )
for _ in range(snake_case_ ):
ans *= n
done.append(snake_case_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCAmelCase_ = prime_fac_a.count(snake_case_ )
for _ in range(snake_case_ ):
ans *= n
done.append(snake_case_ )
# precondition
assert isinstance(snake_case_ , snake_case_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (n >= 0), "'number' must been a positive int"
UpperCAmelCase_ = 0
UpperCAmelCase_ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(snake_case_ ):
ans += 1
# precondition
assert isinstance(snake_case_ , snake_case_ ) and is_prime(
snake_case_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) -> List[str]:
'''simple docstring'''
assert (
is_prime(snake_case_ ) and is_prime(snake_case_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCAmelCase_ = p_number_a + 1 # jump to the next number
UpperCAmelCase_ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(snake_case_ ):
number += 1
while number < p_number_a:
ans.append(snake_case_ )
number += 1
# fetch the next prime number.
while not is_prime(snake_case_ ):
number += 1
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and ans[0] != p_number_a
and ans[len(snake_case_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (n >= 1), "'n' must been int and >= 1"
UpperCAmelCase_ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(snake_case_ )
# precondition
assert ans[0] == 1 and ans[len(snake_case_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> str:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCAmelCase_ = get_divisors(snake_case_ )
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and (divisors[0] == 1)
and (divisors[len(snake_case_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCAmelCase_ = gcd(abs(snake_case_ ) , abs(snake_case_ ) )
# precondition
assert (
isinstance(snake_case_ , snake_case_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase_ ( snake_case_ : int ) -> List[str]:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (n >= 0), "'n' must been a int and >= 0"
UpperCAmelCase_ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (n >= 0), "'n' must been an int and >= 0"
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1 # this will be return
for _ in range(n - 1 ):
UpperCAmelCase_ = ans
ans += fiba
UpperCAmelCase_ = tmp
return ans
| 78 | '''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[str] ={
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = """sew-d"""
def __init__(self : Union[str, Any] , __a : int=32 , __a : int=768 , __a : int=12 , __a : str=12 , __a : List[Any]=3072 , __a : Optional[Any]=2 , __a : Optional[Any]=512 , __a : Tuple=256 , __a : Optional[int]=True , __a : int=True , __a : Dict=("p2c", "c2p") , __a : Dict="layer_norm" , __a : Tuple="gelu_python" , __a : List[str]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.0 , __a : Any=0.1 , __a : Any=0.02 , __a : int=1E-7 , __a : Tuple=1E-5 , __a : Union[str, Any]="group" , __a : str="gelu" , __a : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a : int=False , __a : List[Any]=128 , __a : List[Any]=16 , __a : Optional[Any]=True , __a : Dict=0.05 , __a : List[Any]=10 , __a : Any=2 , __a : Tuple=0.0 , __a : Dict=10 , __a : Tuple=0 , __a : Optional[Any]="mean" , __a : Optional[int]=False , __a : Dict=False , __a : Any=256 , __a : str=0 , __a : Optional[Any]=1 , __a : Any=2 , **__a : Any , ):
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = feat_extract_norm
UpperCAmelCase_ = feat_extract_activation
UpperCAmelCase_ = list(__a )
UpperCAmelCase_ = list(__a )
UpperCAmelCase_ = list(__a )
UpperCAmelCase_ = conv_bias
UpperCAmelCase_ = num_conv_pos_embeddings
UpperCAmelCase_ = num_conv_pos_embedding_groups
UpperCAmelCase_ = len(self.conv_dim )
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = squeeze_factor
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = position_buckets
UpperCAmelCase_ = share_att_key
UpperCAmelCase_ = relative_attention
UpperCAmelCase_ = norm_rel_ebd
UpperCAmelCase_ = list(__a )
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = feat_proj_dropout
UpperCAmelCase_ = final_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = feature_layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ = apply_spec_augment
UpperCAmelCase_ = mask_time_prob
UpperCAmelCase_ = mask_time_length
UpperCAmelCase_ = mask_time_min_masks
UpperCAmelCase_ = mask_feature_prob
UpperCAmelCase_ = mask_feature_length
UpperCAmelCase_ = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# sequence classification
UpperCAmelCase_ = use_weighted_layer_sum
UpperCAmelCase_ = classifier_proj_size
@property
def _lowercase (self : str ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 78 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78 | 1 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ : int ) -> str:
'''simple docstring'''
def is_in_circle(snake_case_ : float , snake_case_ : float ) -> bool:
UpperCAmelCase_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase_ = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Callable[[float], float] , snake_case_ : float = 0.0 , snake_case_ : float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(snake_case_ , snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : float = 0.0 , snake_case_ : float = 1.0 ) -> None:
'''simple docstring'''
def identity_function(snake_case_ : float ) -> float:
return x
UpperCAmelCase_ = area_under_curve_estimator(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def lowerCAmelCase_ ( snake_case_ : int ) -> None:
'''simple docstring'''
def function_to_integrate(snake_case_ : float ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase_ = area_under_curve_estimator(
snake_case_ , snake_case_ , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> None:
'''simple docstring'''
UpperCAmelCase_ = generate_pascal_triangle(snake_case_ )
for row_idx in range(snake_case_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def lowerCAmelCase_ ( snake_case_ : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = []
for current_row_idx in range(snake_case_ ):
UpperCAmelCase_ = populate_current_row(snake_case_ , snake_case_ )
triangle.append(snake_case_ )
return triangle
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for current_col_idx in range(1 , snake_case_ ):
calculate_current_element(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return current_row
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , ) -> None:
'''simple docstring'''
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ = above_to_left_elt + above_to_right_elt
def lowerCAmelCase_ ( snake_case_ : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = [[1]]
for row_index in range(1 , snake_case_ ):
UpperCAmelCase_ = [0] + result[-1] + [0]
UpperCAmelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ = sum(divmod(snake_case_ , 2 ) )
UpperCAmelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ = row_first_half + row_second_half
result.append(snake_case_ )
return result
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case_ : Callable , snake_case_ : int ) -> None:
UpperCAmelCase_ = f"""{func.__name__}({value})"""
UpperCAmelCase_ = timeit(f"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case_ , snake_case_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 78 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
UpperCAmelCase_ = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "a" * 10_00 + ".lock"
UpperCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
UpperCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 78 | '''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: int =logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
@classmethod
def _lowercase (cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase (cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
| 78 | 1 |
'''simple docstring'''
import json
import sys
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple ) -> List[str]:
'''simple docstring'''
with open(snake_case_ , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
UpperCAmelCase_ = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(snake_case_ ):
UpperCAmelCase_ = results[benchmark_name]
UpperCAmelCase_ = benchmark_name.split("/" )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
UpperCAmelCase_ = "| metric |"
UpperCAmelCase_ = "|--------|"
UpperCAmelCase_ = "| new / old (diff) |"
for metric_name in sorted(snake_case_ ):
UpperCAmelCase_ = benchmark_res[metric_name]
UpperCAmelCase_ = metric_vals["new"]
UpperCAmelCase_ = metric_vals.get("old" , snake_case_ )
UpperCAmelCase_ = metric_vals.get("diff" , snake_case_ )
UpperCAmelCase_ = f""" {new_val:f}""" if isinstance(snake_case_ , (int, float) ) else "None"
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(snake_case_ , (int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(snake_case_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(snake_case_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] =sys.argv[1]
SCREAMING_SNAKE_CASE_: Dict =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 78 | '''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_: Any =False
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __A :
def __init__(self : int , __a : str = None , __a : list = [] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def _lowercase (self : Any , __a : int ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ):
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowercase (self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowercase (self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowercase (self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowercase (self : str ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def _lowercase (self : Optional[Any] , __a : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 78 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_: int ='\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_: Any ='\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_: List[Any] ='\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : List[Any] ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _lowercase (self : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : int = CHRF.CHAR_ORDER , __a : int = CHRF.WORD_ORDER , __a : int = CHRF.BETA , __a : bool = False , __a : bool = False , __a : bool = False , ):
UpperCAmelCase_ = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(__a )]
UpperCAmelCase_ = CHRF(__a , __a , __a , __a , __a , __a )
UpperCAmelCase_ = sb_chrf.corpus_score(__a , __a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 78 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor']
SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | 1 |
'''simple docstring'''
from math import pi, sqrt, tan
def lowerCAmelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCAmelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def lowerCAmelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
UpperCAmelCase_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(snake_case_ , 2 ) * torus_radius * tube_radius
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def lowerCAmelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
UpperCAmelCase_ = (sidea + sidea + sidea) / 2
UpperCAmelCase_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def lowerCAmelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : float ) -> float:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 78 | '''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_: Any ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
elif args.student_type == "gpt2":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=snake_case_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." )
UpperCAmelCase_ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ )
UpperCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase_ = special_tok_ids
UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase_ = 0.0 # do not predict special tokens
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
UpperCAmelCase_ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase_ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 78 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE_: Any =25_00_04
SCREAMING_SNAKE_CASE_: int =25_00_20
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = MBartaaTokenizer
a__ : str = MBartaaTokenizerFast
a__ : Union[str, Any] = True
a__ : Dict = True
def _lowercase (self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__a ) , 1054 )
def _lowercase (self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _lowercase (self : int ):
UpperCAmelCase_ = MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _lowercase (self : Tuple ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def _lowercase (self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(__a )
UpperCAmelCase_ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase_ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase_ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase_ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase_ = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase_ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
a__ : Optional[Any] = """facebook/mbart-large-50-one-to-many-mmt"""
a__ : List[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
a__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
a__ : Tuple = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _lowercase (cls : str ):
UpperCAmelCase_ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCAmelCase_ = 1
return cls
def _lowercase (self : List[Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250038 )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def _lowercase (self : List[Any] ):
self.assertIn(__a , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def _lowercase (self : Any ):
UpperCAmelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __a )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[0] , __a )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__a ) , __a )
def _lowercase (self : Optional[Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250053, 250001] )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
UpperCAmelCase_ = MBartaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _lowercase (self : int ):
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"input_ids": [[250004, 62, 3034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 78 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : int = AutoencoderKL
a__ : Optional[Any] = """sample"""
a__ : Union[str, Any] = 1e-2
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase (self : Any ):
return (3, 32, 32)
@property
def _lowercase (self : Dict ):
return (3, 32, 32)
def _lowercase (self : int ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowercase (self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ = torch.randn_like(__a )
UpperCAmelCase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ = dict(model.named_parameters() )
UpperCAmelCase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
UpperCAmelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
UpperCAmelCase_ = model.to(__a )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ = torch.manual_seed(0 )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ = image.to(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Dict , __a : Dict , __a : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy"""
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ):
UpperCAmelCase_ = "fp16" if fpaa else None
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = AutoencoderKL.from_pretrained(
__a , subfolder="vae" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def _lowercase (self : List[Any] , __a : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Dict , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : int , __a : int , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : List[str] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model.encode(__a ).latent_dist
UpperCAmelCase_ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a , __a , atol=__a )
| 78 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | '''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __A ( UpperCamelCase__ ):
a__ : int = """MCTCTFeatureExtractor"""
a__ : int = """AutoTokenizer"""
def __init__(self : Dict , __a : Tuple , __a : Optional[Any] ):
super().__init__(__a , __a )
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
def __call__(self : Dict , *__a : int , **__a : str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ = kwargs.pop("audio" , __a )
UpperCAmelCase_ = kwargs.pop("sampling_rate" , __a )
UpperCAmelCase_ = kwargs.pop("text" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
UpperCAmelCase_ = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings["input_ids"]
return inputs
def _lowercase (self : List[Any] , *__a : List[Any] , **__a : int ):
return self.tokenizer.batch_decode(*__a , **__a )
def _lowercase (self : Optional[int] , *__a : str , **__a : List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__a , **__a )
UpperCAmelCase_ = kwargs.pop("input_features" , __a )
UpperCAmelCase_ = kwargs.pop("labels" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if input_features is not None:
UpperCAmelCase_ = self.feature_extractor.pad(__a , *__a , **__a )
if labels is not None:
UpperCAmelCase_ = self.tokenizer.pad(__a , **__a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ = labels["input_ids"]
return input_features
def _lowercase (self : Optional[int] , *__a : List[Any] , **__a : Tuple ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def _lowercase (self : Any ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
| 78 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
# load original model from timm
UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 )
UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Tuple =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_: Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | '''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = 0
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def _lowercase (self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
UpperCAmelCase_ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Tuple ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" )
def _lowercase (self : Optional[int] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowercase (self : Optional[int] ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : Optional[int] ):
class __A ( UpperCamelCase__ ):
a__ : str = True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 78 | 1 |
'''simple docstring'''
import copy
import re
class __A :
a__ : Optional[int] = """hp"""
a__ : Optional[Any] = {}
a__ : List[Any] = None
@classmethod
def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ):
UpperCAmelCase_ = prefix
UpperCAmelCase_ = defaults
cls.build_naming_info()
@staticmethod
def _lowercase (__a : List[Any] , __a : List[str] ):
if len(__a ) == 0:
return ""
UpperCAmelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
UpperCAmelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a : Union[str, Any] ):
UpperCAmelCase_ = ""
while integer != 0:
UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = sword
break
UpperCAmelCase_ = short_word
UpperCAmelCase_ = word
return short_word
@staticmethod
def _lowercase (__a : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = param_name.split("_" )
UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ = ["", "_"]
for separator in separators:
UpperCAmelCase_ = separator.join(__a )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ = shortname
UpperCAmelCase_ = param_name
return shortname
return param_name
@staticmethod
def _lowercase (__a : int , __a : Union[str, Any] ):
UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a )
UpperCAmelCase_ = short_name
UpperCAmelCase_ = param_name
@classmethod
def _lowercase (cls : Any ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCAmelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
UpperCAmelCase_ = info
@classmethod
def _lowercase (cls : int , __a : Optional[int] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__a , __a ):
UpperCAmelCase_ = 1 if v else 0
UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-"
UpperCAmelCase_ = f"""{key}{sep}{v}"""
name.append(__a )
return "_".join(__a )
@classmethod
def _lowercase (cls : Dict , __a : Dict ):
UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = repr.split("_" )
UpperCAmelCase_ = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" )
else:
UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a )
UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) )
UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCAmelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ = cls.DEFAULTS[k]
return parameters
| 78 | '''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowercase (self : str ):
UpperCAmelCase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(__a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(__a )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(__a ):
self.assertDictEqual(__a , example_records[i] )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(__a )
UpperCAmelCase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _lowercase (self : Dict ): # checks what happens with missing columns
UpperCAmelCase_ = [{"col_1": 1}, {"col_2": "x"}]
UpperCAmelCase_ = Dataset.from_list(__a )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def _lowercase (self : Dict ): # checks if the type can be inferred from the second record
UpperCAmelCase_ = [{"col_1": []}, {"col_1": [1, 2]}]
UpperCAmelCase_ = Dataset.from_list(__a )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def _lowercase (self : Dict ):
UpperCAmelCase_ = Dataset.from_list([] )
self.assertEqual(len(__a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 78 | '''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list[int] ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
UpperCAmelCase_ = sum(snake_case_ ) / len(snake_case_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | '''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
a__ : Optional[Any] = 2
@register_to_config
def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ):
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
UpperCAmelCase_ = use_karras_sigmas
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ):
if schedule_timesteps is None:
UpperCAmelCase_ = self.timesteps
UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ = 1 if len(__a ) > 1 else 0
else:
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
UpperCAmelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ = np.log(__a )
UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a )
UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ = torch.from_numpy(__a )
UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa )
else:
UpperCAmelCase_ = timesteps.to(device=__a )
# empty dt and derivative
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ = defaultdict(__a )
def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ):
# get log sigma
UpperCAmelCase_ = np.log(__a )
# get distribution
UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ = low_idx + 1
UpperCAmelCase_ = log_sigmas[low_idx]
UpperCAmelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ = (low - log_sigma) / (low - high)
UpperCAmelCase_ = np.clip(__a , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ = t.reshape(sigma.shape )
return t
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ):
UpperCAmelCase_ = in_sigmas[-1].item()
UpperCAmelCase_ = in_sigmas[0].item()
UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ = np.linspace(0 , 1 , __a )
UpperCAmelCase_ = sigma_min ** (1 / rho)
UpperCAmelCase_ = sigma_max ** (1 / rho)
UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self : List[str] ):
return self.dt is None
def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
# advance index counter by 1
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ = self.sigmas[step_index - 1]
UpperCAmelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ = 0
UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ = derivative
UpperCAmelCase_ = dt
UpperCAmelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ = self.dt
UpperCAmelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ = self.timesteps.to(original_samples.device )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps]
UpperCAmelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sigma.unsqueeze(-1 )
UpperCAmelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__(self : str ):
return self.config.num_train_timesteps
| 78 | 1 |
'''simple docstring'''
from typing import Any
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : list , snake_case_ : dict , snake_case_ : dict , snake_case_ : dict , ) -> list:
'''simple docstring'''
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for state in states_space:
UpperCAmelCase_ = observations_space[0]
UpperCAmelCase_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCAmelCase_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
UpperCAmelCase_ = observations_space[o]
UpperCAmelCase_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCAmelCase_ = ""
UpperCAmelCase_ = -1
for k_state in states_space:
UpperCAmelCase_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCAmelCase_ = probability
UpperCAmelCase_ = k_state
# Update probabilities and pointers dicts
UpperCAmelCase_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCAmelCase_ = arg_max
# The final observation
UpperCAmelCase_ = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
UpperCAmelCase_ = ""
UpperCAmelCase_ = -1
for k_state in states_space:
UpperCAmelCase_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCAmelCase_ = probability
UpperCAmelCase_ = k_state
UpperCAmelCase_ = arg_max
# Process pointers backwards
UpperCAmelCase_ = last_state
UpperCAmelCase_ = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
UpperCAmelCase_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any ) -> None:
'''simple docstring'''
_validate_list(snake_case_ , "observations_space" )
_validate_list(snake_case_ , "states_space" )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , snake_case_ ):
UpperCAmelCase_ = f"""{var_name} must be a list"""
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = f"""{var_name} must be a list of strings"""
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , ) -> None:
'''simple docstring'''
_validate_dict(snake_case_ , "initial_probabilities" , snake_case_ )
_validate_nested_dict(snake_case_ , "transition_probabilities" )
_validate_nested_dict(snake_case_ , "emission_probabilities" )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : str , snake_case_ : type , snake_case_ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , snake_case_ ):
UpperCAmelCase_ = f"""{var_name} must be a dict"""
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
UpperCAmelCase_ = f"""{var_name} all keys must be strings"""
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
UpperCAmelCase_ = "nested dictionary " if nested else ""
UpperCAmelCase_ = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78 | 1 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE_: Optional[int] =NewType('DataClass', Any)
SCREAMING_SNAKE_CASE_: str =NewType('DataClassType', Any)
def lowerCAmelCase_ ( snake_case_ : int ) -> Any:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase_ ( snake_case_ : list ) -> Callable[[str], Any]:
'''simple docstring'''
UpperCAmelCase_ = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( *,
snake_case_ : Union[str, List[str]] = None , snake_case_ : str = None , snake_case_ : Any = dataclasses.MISSING , snake_case_ : Callable[[], Any] = dataclasses.MISSING , snake_case_ : dict = None , **snake_case_ : Optional[int] , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCAmelCase_ = {}
if aliases is not None:
UpperCAmelCase_ = aliases
if help is not None:
UpperCAmelCase_ = help
return dataclasses.field(metadata=snake_case_ , default=snake_case_ , default_factory=snake_case_ , **snake_case_ )
class __A ( UpperCamelCase__ ):
a__ : Iterable[DataClassType]
def __init__(self : Optional[Any] , __a : Union[DataClassType, Iterable[DataClassType]] , **__a : List[str] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCAmelCase_ = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
UpperCAmelCase_ = [dataclass_types]
UpperCAmelCase_ = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def _lowercase (__a : ArgumentParser , __a : dataclasses.Field ):
UpperCAmelCase_ = f"""--{field.name}"""
UpperCAmelCase_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCAmelCase_ = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
UpperCAmelCase_ = [aliases]
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f""" Problem encountered in field '{field.name}'.""" )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
UpperCAmelCase_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCAmelCase_ = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCAmelCase_ = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCAmelCase_ = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
UpperCAmelCase_ = field.type.__args__
else:
UpperCAmelCase_ = [x.value for x in field.type]
UpperCAmelCase_ = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ = field.default
else:
UpperCAmelCase_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCAmelCase_ = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
UpperCAmelCase_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCAmelCase_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCAmelCase_ = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCAmelCase_ = "?"
# This is the value that will get picked if we do --field_name (without value)
UpperCAmelCase_ = True
elif isclass(__a ) and issubclass(__a , __a ):
UpperCAmelCase_ = field.type.__args__[0]
UpperCAmelCase_ = "+"
if field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCAmelCase_ = True
else:
UpperCAmelCase_ = field.type
if field.default is not dataclasses.MISSING:
UpperCAmelCase_ = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCAmelCase_ = field.default_factory()
else:
UpperCAmelCase_ = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCAmelCase_ = False
parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **__a )
def _lowercase (self : int , __a : DataClassType ):
if hasattr(__a , "_argument_group_name" ):
UpperCAmelCase_ = self.add_argument_group(dtype._argument_group_name )
else:
UpperCAmelCase_ = self
try:
UpperCAmelCase_ = get_type_hints(__a )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
UpperCAmelCase_ = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
UpperCAmelCase_ = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def _lowercase (self : Union[str, Any] , __a : Any=None , __a : Tuple=False , __a : Tuple=True , __a : Union[str, Any]=None , __a : Tuple=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCAmelCase_ = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCAmelCase_ = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCAmelCase_ , UpperCAmelCase_ = args_file_parser.parse_known_args(args=__a )
UpperCAmelCase_ = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
UpperCAmelCase_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCAmelCase_ = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCAmelCase_ , UpperCAmelCase_ = self.parse_known_args(args=__a )
UpperCAmelCase_ = []
for dtype in self.dataclass_types:
UpperCAmelCase_ = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCAmelCase_ = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
UpperCAmelCase_ = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _lowercase (self : Tuple , __a : Dict[str, Any] , __a : bool = False ):
UpperCAmelCase_ = set(args.keys() )
UpperCAmelCase_ = []
for dtype in self.dataclass_types:
UpperCAmelCase_ = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCAmelCase_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCAmelCase_ = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__a )}""" )
return tuple(__a )
def _lowercase (self : Dict , __a : str , __a : bool = False ):
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
UpperCAmelCase_ = json.loads(open_json_file.read() )
UpperCAmelCase_ = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def _lowercase (self : int , __a : str , __a : bool = False ):
UpperCAmelCase_ = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a )
| 78 | '''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: Optional[Any] =get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Union[str, Any] = DebertaVaTokenizer
a__ : Any = DebertaVaTokenizerFast
a__ : Union[str, Any] = True
a__ : Tuple = True
def _lowercase (self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = DebertaVaTokenizer(__a , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : int , __a : Optional[Any] ):
UpperCAmelCase_ = "this is a test"
UpperCAmelCase_ = "this is a test"
return input_text, output_text
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "<pad>"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(__a ) , 30001 )
def _lowercase (self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase (self : Tuple ):
# fmt: off
UpperCAmelCase_ = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowercase (self : str ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowercase (self : Any ):
pass
def _lowercase (self : List[Any] ):
# fmt: off
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , split_by_punct=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , split_by_punct=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def _lowercase (self : Dict ):
# fmt: off
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def _lowercase (self : Any ):
# fmt: off
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def _lowercase (self : Optional[Any] ):
# fmt: off
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def _lowercase (self : int ):
# fmt: off
UpperCAmelCase_ = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "This is a test"
UpperCAmelCase_ = [13, 1, 4398, 25, 21, 1289]
UpperCAmelCase_ = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ = DebertaVaTokenizer(__a , keep_accents=__a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , keep_accents=__a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
# fmt: off
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
UpperCAmelCase_ = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = DebertaVaTokenizer(__a )
UpperCAmelCase_ = tokenizer.encode("sequence builders" )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __a , )
@slow
def _lowercase (self : Union[str, Any] ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 78 | '''simple docstring'''
import copy
import re
class __A :
a__ : Optional[int] = """hp"""
a__ : Optional[Any] = {}
a__ : List[Any] = None
@classmethod
def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ):
UpperCAmelCase_ = prefix
UpperCAmelCase_ = defaults
cls.build_naming_info()
@staticmethod
def _lowercase (__a : List[Any] , __a : List[str] ):
if len(__a ) == 0:
return ""
UpperCAmelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
UpperCAmelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a : Union[str, Any] ):
UpperCAmelCase_ = ""
while integer != 0:
UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = sword
break
UpperCAmelCase_ = short_word
UpperCAmelCase_ = word
return short_word
@staticmethod
def _lowercase (__a : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = param_name.split("_" )
UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ = ["", "_"]
for separator in separators:
UpperCAmelCase_ = separator.join(__a )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ = shortname
UpperCAmelCase_ = param_name
return shortname
return param_name
@staticmethod
def _lowercase (__a : int , __a : Union[str, Any] ):
UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a )
UpperCAmelCase_ = short_name
UpperCAmelCase_ = param_name
@classmethod
def _lowercase (cls : Any ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCAmelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
UpperCAmelCase_ = info
@classmethod
def _lowercase (cls : int , __a : Optional[int] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__a , __a ):
UpperCAmelCase_ = 1 if v else 0
UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-"
UpperCAmelCase_ = f"""{key}{sep}{v}"""
name.append(__a )
return "_".join(__a )
@classmethod
def _lowercase (cls : Dict , __a : Dict ):
UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = repr.split("_" )
UpperCAmelCase_ = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" )
else:
UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a )
UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) )
UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCAmelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ = cls.DEFAULTS[k]
return parameters
| 78 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
stooge(snake_case_ , 0 , len(snake_case_ ) - 1 )
return arr
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : str ) -> Tuple:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCAmelCase_ , UpperCAmelCase_ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCAmelCase_ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case_ , snake_case_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case_ , i + t , (snake_case_) )
# Recursively sort first 2/3 elements
stooge(snake_case_ , snake_case_ , (h - t) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_: Tuple =[int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 78 | '''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78 | '''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> List[str]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def _lowercase (self : Any ):
UpperCAmelCase_ = "x = 3"
UpperCAmelCase_ = {}
UpperCAmelCase_ = evaluate(__a , {} , state=__a )
assert result == 3
self.assertDictEqual(__a , {"x": 3} )
UpperCAmelCase_ = "x = y"
UpperCAmelCase_ = {"y": 5}
UpperCAmelCase_ = evaluate(__a , {} , state=__a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__a , {"x": 5, "y": 5} )
def _lowercase (self : Dict ):
UpperCAmelCase_ = "y = add_two(x)"
UpperCAmelCase_ = {"x": 3}
UpperCAmelCase_ = evaluate(__a , {"add_two": add_two} , state=__a )
assert result == 5
self.assertDictEqual(__a , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase_ = evaluate(__a , {} , state=__a )
assert result is None
assert "tried to execute add_two" in out.out
def _lowercase (self : Dict ):
UpperCAmelCase_ = "x = 3"
UpperCAmelCase_ = {}
UpperCAmelCase_ = evaluate(__a , {} , state=__a )
assert result == 3
self.assertDictEqual(__a , {"x": 3} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = "test_dict = {'x': x, 'y': add_two(x)}"
UpperCAmelCase_ = {"x": 3}
UpperCAmelCase_ = evaluate(__a , {"add_two": add_two} , state=__a )
self.assertDictEqual(__a , {"x": 3, "y": 5} )
self.assertDictEqual(__a , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "x = 3\ny = 5"
UpperCAmelCase_ = {}
UpperCAmelCase_ = evaluate(__a , {} , state=__a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__a , {"x": 3, "y": 5} )
def _lowercase (self : str ):
UpperCAmelCase_ = "text = f'This is x: {x}.'"
UpperCAmelCase_ = {"x": 3}
UpperCAmelCase_ = evaluate(__a , {} , state=__a )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__a , {"x": 3, "text": "This is x: 3."} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
UpperCAmelCase_ = {"x": 3}
UpperCAmelCase_ = evaluate(__a , {} , state=__a )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__a , {"x": 3, "y": 2} )
UpperCAmelCase_ = {"x": 8}
UpperCAmelCase_ = evaluate(__a , {} , state=__a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__a , {"x": 8, "y": 5} )
def _lowercase (self : Any ):
UpperCAmelCase_ = "test_list = [x, add_two(x)]"
UpperCAmelCase_ = {"x": 3}
UpperCAmelCase_ = evaluate(__a , {"add_two": add_two} , state=__a )
self.assertListEqual(__a , [3, 5] )
self.assertDictEqual(__a , {"x": 3, "test_list": [3, 5]} )
def _lowercase (self : int ):
UpperCAmelCase_ = "y = x"
UpperCAmelCase_ = {"x": 3}
UpperCAmelCase_ = evaluate(__a , {} , state=__a )
assert result == 3
self.assertDictEqual(__a , {"x": 3, "y": 3} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = "test_list = [x, add_two(x)]\ntest_list[1]"
UpperCAmelCase_ = {"x": 3}
UpperCAmelCase_ = evaluate(__a , {"add_two": add_two} , state=__a )
assert result == 5
self.assertDictEqual(__a , {"x": 3, "test_list": [3, 5]} )
UpperCAmelCase_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
UpperCAmelCase_ = {"x": 3}
UpperCAmelCase_ = evaluate(__a , {"add_two": add_two} , state=__a )
assert result == 5
self.assertDictEqual(__a , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = "x = 0\nfor i in range(3):\n x = i"
UpperCAmelCase_ = {}
UpperCAmelCase_ = evaluate(__a , {"range": range} , state=__a )
assert result == 2
self.assertDictEqual(__a , {"x": 2, "i": 2} )
| 78 | '''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class __A :
def __init__(self : int , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ):
UpperCAmelCase_ = regularization
UpperCAmelCase_ = gamma
if kernel == "linear":
UpperCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(__a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.dot(__a , __a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ):
UpperCAmelCase_ = observations
UpperCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
UpperCAmelCase_ = 0
((UpperCAmelCase_) , ) = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 )
UpperCAmelCase_ = Bounds(0 , self.regularization )
UpperCAmelCase_ = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
UpperCAmelCase_ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ = s / n
def _lowercase (self : Optional[int] , __a : ndarray ):
UpperCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : Any ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__a )
@property
def _lowercase (self : Optional[Any] ):
def extract(*__a : Any , **__a : Any ):
class __A :
def __init__(self : Union[str, Any] ):
UpperCAmelCase_ = torch.ones([0] )
def _lowercase (self : Tuple , __a : str ):
self.pixel_values.to(__a )
return self
return Out()
return extract
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase_ = 77
UpperCAmelCase_ = self.dummy_image.to(__a )
UpperCAmelCase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
UpperCAmelCase_ = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__a , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__a , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase_ = 77
UpperCAmelCase_ = self.dummy_image.to(__a )
# put models in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = vae.half()
UpperCAmelCase_ = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
UpperCAmelCase_ = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = alt_pipe(
[prompt] , generator=__a , num_inference_steps=2 , output_type="np" , image=__a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ = init_image.resize((760, 504) )
UpperCAmelCase_ = "BAAI/AltDiffusion"
UpperCAmelCase_ = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "A fantasy landscape, trending on artstation"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCAmelCase_ = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ = init_image.resize((768, 512) )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
UpperCAmelCase_ = "BAAI/AltDiffusion"
UpperCAmelCase_ = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "A fantasy landscape, trending on artstation"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 78 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """perceiver"""
def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ):
super().__init__(**__a )
UpperCAmelCase_ = num_latents
UpperCAmelCase_ = d_latents
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_blocks
UpperCAmelCase_ = num_self_attends_per_block
UpperCAmelCase_ = num_self_attention_heads
UpperCAmelCase_ = num_cross_attention_heads
UpperCAmelCase_ = qk_channels
UpperCAmelCase_ = v_channels
UpperCAmelCase_ = cross_attention_shape_for_attention
UpperCAmelCase_ = self_attention_widening_factor
UpperCAmelCase_ = cross_attention_widening_factor
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
# image classification attributes
UpperCAmelCase_ = image_size
# flow attributes
UpperCAmelCase_ = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = audio_samples_per_frame
UpperCAmelCase_ = samples_per_patch
UpperCAmelCase_ = output_shape
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _lowercase (self : Optional[Any] ):
return 1E-4
def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("input_ids" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 78 | 1 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCAmelCase_ ( snake_case_ : SplitDict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = split_dict._to_yaml_list()
assert len(snake_case_ ) == len(snake_case_ )
UpperCAmelCase_ = SplitDict._from_yaml_list(snake_case_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase_ = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase_ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 78 | '''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : int ) -> List[str]:
'''simple docstring'''
if openai_config_file == "":
UpperCAmelCase_ = OpenAIGPTConfig()
else:
UpperCAmelCase_ = OpenAIGPTConfig.from_json_file(snake_case_ )
UpperCAmelCase_ = OpenAIGPTModel(snake_case_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 78 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78 | 1 |
'''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
UpperCAmelCase_ = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCAmelCase_ = 2
UpperCAmelCase_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase_ = i
while n % i == 0:
UpperCAmelCase_ = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 78 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = checkpoints.load_tax_checkpoint(snake_case_ )
UpperCAmelCase_ = flatten_dict(snake_case_ )
return flax_params
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
UpperCAmelCase_ = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCAmelCase_ = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCAmelCase_ = new_key.replace(snake_case_ , snake_case_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCAmelCase_ = new_key.replace(snake_case_ , snake_case_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCAmelCase_ = re.sub(R"layers_(\d+)" , R"layer.\1" , snake_case_ )
UpperCAmelCase_ = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCAmelCase_ = re.sub(R"layers_(\d+)" , R"layer.\1" , snake_case_ )
UpperCAmelCase_ = flax_dict[key]
UpperCAmelCase_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCAmelCase_ = torch.from_numpy(converted_dict[key].T )
else:
UpperCAmelCase_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Dict=False , snake_case_ : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_flax_param(snake_case_ )
if not use_large:
UpperCAmelCase_ = PixaStructVisionConfig()
UpperCAmelCase_ = PixaStructTextConfig()
else:
UpperCAmelCase_ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
UpperCAmelCase_ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
UpperCAmelCase_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=snake_case_ )
UpperCAmelCase_ = PixaStructForConditionalGeneration(snake_case_ )
UpperCAmelCase_ = rename_and_convert_flax_params(snake_case_ )
model.load_state_dict(snake_case_ )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
UpperCAmelCase_ = PixaStructImageProcessor()
UpperCAmelCase_ = PixaStructProcessor(image_processor=snake_case_ , tokenizer=snake_case_ )
if use_large:
UpperCAmelCase_ = 40_96
UpperCAmelCase_ = True
# mkdir if needed
os.makedirs(snake_case_ , exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
print("Model saved in {}".format(snake_case_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 78 | '''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: int =logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
@classmethod
def _lowercase (cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase (cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
| 78 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCAmelCase_ = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCAmelCase_ = True
if a[i].islower():
UpperCAmelCase_ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | '''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_: Any =False
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __A :
def __init__(self : int , __a : str = None , __a : list = [] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def _lowercase (self : Any , __a : int ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ):
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowercase (self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowercase (self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowercase (self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowercase (self : str ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def _lowercase (self : Optional[Any] , __a : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 78 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(snake_case_ )
if number < 1:
UpperCAmelCase_ = f"""Input value of [number={number}] must be > 0"""
raise ValueError(snake_case_ )
UpperCAmelCase_ = 1
for i in range(1 , snake_case_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor']
SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | 1 |
'''simple docstring'''
import unittest
import numpy as np
def lowerCAmelCase_ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : np.ndarray | None = None , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = np.shape(snake_case_ )
UpperCAmelCase_ = np.shape(snake_case_ )
UpperCAmelCase_ = np.shape(snake_case_ )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ = (
"Expected the same number of rows for A and B. "
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(snake_case_ )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ = (
"Expected the same number of columns for B and C. "
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(snake_case_ )
UpperCAmelCase_ = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ = np.linalg.inv(snake_case_ )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __A ( unittest.TestCase ):
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ = schur_complement(__a , __a , __a )
UpperCAmelCase_ = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ = np.linalg.det(__a )
UpperCAmelCase_ = np.linalg.det(__a )
UpperCAmelCase_ = np.linalg.det(__a )
self.assertAlmostEqual(__a , det_a * det_s )
def _lowercase (self : str ):
UpperCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__a ):
schur_complement(__a , __a , __a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__a ):
schur_complement(__a , __a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 78 | '''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_: Any ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
elif args.student_type == "gpt2":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=snake_case_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." )
UpperCAmelCase_ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ )
UpperCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase_ = special_tok_ids
UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase_ = 0.0 # do not predict special tokens
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
UpperCAmelCase_ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase_ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 78 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ='▁'
SCREAMING_SNAKE_CASE_: str ={
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
SCREAMING_SNAKE_CASE_: Any ={
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
SCREAMING_SNAKE_CASE_: Tuple ={
'facebook/s2t-small-librispeech-asr': 10_24,
}
SCREAMING_SNAKE_CASE_: Any =['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
SCREAMING_SNAKE_CASE_: Dict ={'mustc': MUSTC_LANGS}
class __A ( UpperCamelCase__ ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[Any] = MAX_MODEL_INPUT_SIZES
a__ : int = ["""input_ids""", """attention_mask"""]
a__ : List[int] = []
def __init__(self : Optional[Any] , __a : Optional[int] , __a : str , __a : Union[str, Any]="<s>" , __a : List[str]="</s>" , __a : Tuple="<pad>" , __a : Optional[Any]="<unk>" , __a : Optional[int]=False , __a : str=False , __a : Tuple=None , __a : List[str]=None , __a : Optional[Dict[str, Any]] = None , **__a : Any , ):
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCAmelCase_ = do_upper_case
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = load_json(__a )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = spm_file
UpperCAmelCase_ = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
UpperCAmelCase_ = lang_codes
UpperCAmelCase_ = LANGUAGES[lang_codes]
UpperCAmelCase_ = [f"""<lang:{lang}>""" for lang in self.langs]
UpperCAmelCase_ = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
UpperCAmelCase_ = self.lang_tokens
UpperCAmelCase_ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCAmelCase_ = {}
@property
def _lowercase (self : List[str] ):
return len(self.encoder )
@property
def _lowercase (self : int ):
return self._tgt_lang
@tgt_lang.setter
def _lowercase (self : Any , __a : int ):
UpperCAmelCase_ = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def _lowercase (self : Any , __a : str ):
UpperCAmelCase_ = self.lang_code_to_id[tgt_lang]
UpperCAmelCase_ = [lang_code_id]
def _lowercase (self : Optional[int] , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def _lowercase (self : str , __a : str ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def _lowercase (self : Dict , __a : int ):
return self.decoder.get(__a , self.unk_token )
def _lowercase (self : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase_ = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase_ = []
else:
current_sub_tokens.append(__a )
UpperCAmelCase_ = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _lowercase (self : Union[str, Any] , __a : List[Any] , __a : Dict=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _lowercase (self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
UpperCAmelCase_ = [1] * len(self.prefix_tokens )
UpperCAmelCase_ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Dict ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__(self : Tuple , __a : Dict ):
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = load_spm(self.spm_file , self.sp_model_kwargs )
def _lowercase (self : Union[str, Any] , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = Path(__a )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
UpperCAmelCase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
UpperCAmelCase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
UpperCAmelCase_ = sentencepiece.SentencePieceProcessor(**snake_case_ )
spm.Load(str(snake_case_ ) )
return spm
def lowerCAmelCase_ ( snake_case_ : str ) -> Union[Dict, List]:
'''simple docstring'''
with open(snake_case_ , "r" ) as f:
return json.load(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : str ) -> None:
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=2 )
| 78 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : int = AutoencoderKL
a__ : Optional[Any] = """sample"""
a__ : Union[str, Any] = 1e-2
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase (self : Any ):
return (3, 32, 32)
@property
def _lowercase (self : Dict ):
return (3, 32, 32)
def _lowercase (self : int ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowercase (self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ = torch.randn_like(__a )
UpperCAmelCase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ = dict(model.named_parameters() )
UpperCAmelCase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
UpperCAmelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
UpperCAmelCase_ = model.to(__a )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ = torch.manual_seed(0 )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ = image.to(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Dict , __a : Dict , __a : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy"""
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ):
UpperCAmelCase_ = "fp16" if fpaa else None
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = AutoencoderKL.from_pretrained(
__a , subfolder="vae" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def _lowercase (self : List[Any] , __a : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Dict , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : int , __a : int , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : List[str] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model.encode(__a ).latent_dist
UpperCAmelCase_ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a , __a , atol=__a )
| 78 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __A :
a__ : Any = MBartConfig
a__ : Dict = {}
a__ : str = """gelu"""
def __init__(self : Union[str, Any] , __a : Union[str, Any] , __a : Dict=13 , __a : Tuple=7 , __a : int=True , __a : Tuple=False , __a : Dict=99 , __a : int=32 , __a : str=2 , __a : Optional[int]=4 , __a : Dict=37 , __a : Tuple=0.1 , __a : str=0.1 , __a : Optional[int]=20 , __a : Dict=2 , __a : List[str]=1 , __a : Any=0 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
def _lowercase (self : Any ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ = prepare_mbart_inputs_dict(__a , __a , __a )
return config, inputs_dict
def _lowercase (self : List[str] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = TFMBartModel(config=__a ).get_decoder()
UpperCAmelCase_ = inputs_dict["input_ids"]
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ = inputs_dict["head_mask"]
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
UpperCAmelCase_ = past_key_values[1]
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[Any]=None , snake_case_ : str=None , snake_case_ : Any=None , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : List[str] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
a__ : Tuple = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
a__ : List[Any] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ : int = True
a__ : Tuple = False
a__ : Any = False
def _lowercase (self : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : Tuple , __a : List[Any] , __a : int ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase (self : str ):
UpperCAmelCase_ = TFMBartModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a )
def _lowercase (self : List[Any] ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_sentencepiece
@require_tokenizers
@require_tf
class __A ( unittest.TestCase ):
a__ : Any = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
a__ : List[Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
a__ : Tuple = """facebook/mbart-large-en-ro"""
@cached_property
def _lowercase (self : Dict ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase (self : str ):
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase (self : Optional[int] , **__a : Optional[int] ):
UpperCAmelCase_ = self.translate_src_text(**__a )
self.assertListEqual(self.expected_text , __a )
def _lowercase (self : Optional[Any] , **__a : Tuple ):
UpperCAmelCase_ = self.tokenizer(self.src_text , **__a , return_tensors="tf" )
UpperCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase_ = self.tokenizer.batch_decode(__a , skip_special_tokens=__a )
return generated_words
@slow
def _lowercase (self : List[Any] ):
self._assert_generated_batch_equal_expected()
| 78 | '''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 | 1 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE_: int =6378137.0
SCREAMING_SNAKE_CASE_: List[Any] =6356752.314245
SCREAMING_SNAKE_CASE_: Dict =6_37_81_37
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
UpperCAmelCase_ = radians(snake_case_ )
UpperCAmelCase_ = radians(snake_case_ )
# Equation
UpperCAmelCase_ = sin((phi_a - phi_a) / 2 )
UpperCAmelCase_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCAmelCase_ = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
# load original model from timm
UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 )
UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 | 1 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __A ( unittest.TestCase ):
def __init__(self : Tuple , __a : Tuple , __a : List[Any]=2 , __a : Optional[Any]=56 , __a : List[Any]=True , __a : str=True , __a : Optional[int]=True , __a : Dict=True , __a : Any=99 , __a : Tuple=32 , __a : List[str]=2 , __a : str=2 , __a : Tuple=7 , __a : List[Any]="gelu_new" , __a : Optional[Any]=0.1 , __a : Optional[Any]=0.1 , __a : Optional[Any]=512 , __a : str=16 , __a : List[str]=2 , __a : Tuple=0.02 , __a : Dict=4 , __a : Union[str, Any]="block_sparse" , __a : Dict=True , __a : List[Any]=False , __a : List[str]=2 , __a : Any=3 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_attention_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = rescale_embeddings
UpperCAmelCase_ = attention_type
UpperCAmelCase_ = use_bias
UpperCAmelCase_ = block_size
UpperCAmelCase_ = num_random_blocks
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_attention_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[Any] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
a__ : Optional[Any] = False
a__ : int = False
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase (self : List[str] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase (self : Any ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase (self : Optional[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase (self : Any ):
super().test_hidden_states_output()
@slow
def _lowercase (self : List[Any] ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(__a )
def _lowercase (self : Tuple ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : int , __a : Any=None , **__a : str ):
return model(input_ids=__a , attention_mask=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowercase (self : int , __a : Dict , __a : Any , __a : Dict , __a : Optional[int]=1E-5 , __a : List[str]="outputs" , __a : Union[str, Any]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(__a , __a , __a , __a , __a , __a )
| 78 | '''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = 0
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def _lowercase (self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
UpperCAmelCase_ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Tuple ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" )
def _lowercase (self : Optional[int] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowercase (self : Optional[int] ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : Optional[int] ):
class __A ( UpperCamelCase__ ):
a__ : str = True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 78 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
'''simple docstring'''
UpperCAmelCase_ = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCAmelCase_ = array[0]
UpperCAmelCase_ = False
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCAmelCase_ = True
UpperCAmelCase_ = [element for element in array[i:] if element >= array[i]]
UpperCAmelCase_ = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
UpperCAmelCase_ = temp_array
else:
i += 1
UpperCAmelCase_ = [element for element in array[1:] if element >= pivot]
UpperCAmelCase_ = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | '''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: Any =get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : int = GPTSwaTokenizer
a__ : Optional[Any] = False
a__ : str = True
a__ : Optional[int] = False
def _lowercase (self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = GPTSwaTokenizer(__a , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : Union[str, Any] , __a : Optional[int] ):
UpperCAmelCase_ = "This is a test"
UpperCAmelCase_ = "This is a test"
return input_text, output_text
def _lowercase (self : List[str] ):
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Any ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__a ) , 2000 )
def _lowercase (self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = GPTSwaTokenizer(__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [465, 287, 265, 631, 842] )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__a , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
# fmt: off
self.assertListEqual(
__a , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = GPTSwaTokenizer(__a )
UpperCAmelCase_ = ["This is a test", "I was born in 92000, and this is falsé."]
UpperCAmelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__a , __a ):
self.assertListEqual(tokenizer.encode_fast(__a ) , __a )
# Test that decode_fast returns the input text
for text, token_ids in zip(__a , __a ):
self.assertEqual(tokenizer.decode_fast(__a ) , __a )
@slow
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
UpperCAmelCase_ = {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__a , )
| 78 | '''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__(self : str , __a : Optional[int] , __a : List[str]=13 , __a : Tuple=32 , __a : Optional[int]=2 , __a : Any=3 , __a : Union[str, Any]=16 , __a : Tuple=[32, 64, 128] , __a : Tuple=[1, 2, 1] , __a : Any=[2, 2, 4] , __a : int=2 , __a : Tuple=2.0 , __a : Dict=True , __a : List[str]=0.0 , __a : List[str]=0.0 , __a : int=0.1 , __a : List[Any]="gelu" , __a : Optional[int]=False , __a : Union[str, Any]=True , __a : Optional[Any]=0.02 , __a : Tuple=1E-5 , __a : Tuple=True , __a : List[Any]=None , __a : List[str]=True , __a : int=10 , __a : Optional[int]=8 , __a : List[Any]=["stage1", "stage2"] , __a : Dict=[1, 2] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = patch_norm
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = is_training
UpperCAmelCase_ = scope
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = encoder_stride
UpperCAmelCase_ = out_features
UpperCAmelCase_ = out_indices
def _lowercase (self : Tuple ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase (self : Dict , __a : str , __a : int , __a : Union[str, Any] ):
UpperCAmelCase_ = FocalNetModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a )
UpperCAmelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase (self : str , __a : List[str] , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ = None
UpperCAmelCase_ = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase (self : Tuple , __a : Tuple , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase (self : Any , __a : int , __a : List[Any] , __a : Tuple ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
a__ : List[str] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
a__ : Union[str, Any] = False
a__ : str = False
a__ : List[str] = False
a__ : int = False
a__ : Optional[Any] = False
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = FocalNetModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def _lowercase (self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase (self : Optional[Any] ):
return
def _lowercase (self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def _lowercase (self : str ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def _lowercase (self : Dict ):
pass
def _lowercase (self : Dict ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def _lowercase (self : str ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Union[str, Any] , __a : Dict , __a : List[str] , __a : Tuple , __a : Optional[int] ):
UpperCAmelCase_ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
UpperCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = reshaped_hidden_states[0].shape
UpperCAmelCase_ = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
self.check_hidden_states_output(__a , __a , __a , __a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def _lowercase (self : Optional[int] ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(__a )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def _lowercase (self : Optional[int] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def _lowercase (self : Tuple ):
UpperCAmelCase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__a )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__a )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase_ = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FocalNetBackbone,) if is_torch_available() else ()
a__ : int = FocalNetConfig
a__ : List[Any] = False
def _lowercase (self : Dict ):
UpperCAmelCase_ = FocalNetModelTester(self )
| 78 | '''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
a__ : Optional[Any] = 2
@register_to_config
def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ):
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
UpperCAmelCase_ = use_karras_sigmas
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ):
if schedule_timesteps is None:
UpperCAmelCase_ = self.timesteps
UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ = 1 if len(__a ) > 1 else 0
else:
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
UpperCAmelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ = np.log(__a )
UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a )
UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ = torch.from_numpy(__a )
UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa )
else:
UpperCAmelCase_ = timesteps.to(device=__a )
# empty dt and derivative
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ = defaultdict(__a )
def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ):
# get log sigma
UpperCAmelCase_ = np.log(__a )
# get distribution
UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ = low_idx + 1
UpperCAmelCase_ = log_sigmas[low_idx]
UpperCAmelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ = (low - log_sigma) / (low - high)
UpperCAmelCase_ = np.clip(__a , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ = t.reshape(sigma.shape )
return t
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ):
UpperCAmelCase_ = in_sigmas[-1].item()
UpperCAmelCase_ = in_sigmas[0].item()
UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ = np.linspace(0 , 1 , __a )
UpperCAmelCase_ = sigma_min ** (1 / rho)
UpperCAmelCase_ = sigma_max ** (1 / rho)
UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self : List[str] ):
return self.dt is None
def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
# advance index counter by 1
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ = self.sigmas[step_index - 1]
UpperCAmelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ = 0
UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ = derivative
UpperCAmelCase_ = dt
UpperCAmelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ = self.dt
UpperCAmelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ = self.timesteps.to(original_samples.device )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps]
UpperCAmelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sigma.unsqueeze(-1 )
UpperCAmelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__(self : str ):
return self.config.num_train_timesteps
| 78 | 1 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
SCREAMING_SNAKE_CASE_: int =['text', 'image', 'audio']
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(snake_case_ , snake_case_ ):
inputs.append(create_inputs(snake_case_ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowerCAmelCase_ ( snake_case_ : List ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
for output in outputs:
if isinstance(snake_case_ , (str, AgentText) ):
output_types.append("text" )
elif isinstance(snake_case_ , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(snake_case_ , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __A :
def _lowercase (self : Optional[Any] ):
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
UpperCAmelCase_ = self.tool.inputs
for _input in inputs:
if isinstance(_input , __a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase_ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = create_inputs(self.tool.inputs )
UpperCAmelCase_ = self.tool(*__a )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase_ = [outputs]
self.assertListEqual(output_types(__a ) , self.tool.outputs )
def _lowercase (self : str ):
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = create_inputs(self.tool.inputs )
UpperCAmelCase_ = self.tool(*__a )
if not isinstance(__a , __a ):
UpperCAmelCase_ = [outputs]
self.assertEqual(len(__a ) , len(self.tool.outputs ) )
for output, output_type in zip(__a , self.tool.outputs ):
UpperCAmelCase_ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__a , __a ) )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = create_inputs(self.tool.inputs )
UpperCAmelCase_ = []
for _input, input_type in zip(__a , self.tool.inputs ):
if isinstance(__a , __a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase_ = self.tool(*__a )
if not isinstance(__a , __a ):
UpperCAmelCase_ = [outputs]
self.assertEqual(len(__a ) , len(self.tool.outputs ) )
| 78 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Union[str, Any] = ["""pixel_values"""]
def __init__(self : List[Any] , __a : bool = True , __a : int = 32 , __a : Union[str, Any]=PILImageResampling.BILINEAR , __a : bool = True , **__a : Optional[Any] , ):
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = resample
super().__init__(**__a )
def _lowercase (self : Any , __a : np.ndarray , __a : int , __a : Optional[Any] , __a : Optional[ChannelDimension] = None , **__a : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ = height // size_divisor * size_divisor
UpperCAmelCase_ = width // size_divisor * size_divisor
UpperCAmelCase_ = resize(__a , (new_h, new_w) , resample=__a , data_format=__a , **__a )
return image
def _lowercase (self : Dict , __a : np.ndarray , __a : float , __a : Optional[ChannelDimension] = None , **__a : Union[str, Any] ):
return rescale(image=__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Union[str, Any] , __a : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Union[str, Any]=None , __a : Optional[bool] = None , __a : Optional[Union[TensorType, str]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Optional[Any] , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for img in images]
if do_resize:
UpperCAmelCase_ = [self.resize(__a , size_divisor=__a , resample=__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(__a , scale=1 / 255 ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | '''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
SCREAMING_SNAKE_CASE_: Any =[num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def lowerCAmelCase_ ( snake_case_ : int ) -> list[int]:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCAmelCase_ = []
for num in range(len(snake_case_ ) ):
UpperCAmelCase_ = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase_ = odd_composites[num] - 2 * i * i
if is_prime(snake_case_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case_ ) == n:
return list_nums
return []
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"{solution() = }")
| 78 | '''simple docstring'''
import copy
import re
class __A :
a__ : Optional[int] = """hp"""
a__ : Optional[Any] = {}
a__ : List[Any] = None
@classmethod
def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ):
UpperCAmelCase_ = prefix
UpperCAmelCase_ = defaults
cls.build_naming_info()
@staticmethod
def _lowercase (__a : List[Any] , __a : List[str] ):
if len(__a ) == 0:
return ""
UpperCAmelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
UpperCAmelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a : Union[str, Any] ):
UpperCAmelCase_ = ""
while integer != 0:
UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = sword
break
UpperCAmelCase_ = short_word
UpperCAmelCase_ = word
return short_word
@staticmethod
def _lowercase (__a : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = param_name.split("_" )
UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ = ["", "_"]
for separator in separators:
UpperCAmelCase_ = separator.join(__a )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ = shortname
UpperCAmelCase_ = param_name
return shortname
return param_name
@staticmethod
def _lowercase (__a : int , __a : Union[str, Any] ):
UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a )
UpperCAmelCase_ = short_name
UpperCAmelCase_ = param_name
@classmethod
def _lowercase (cls : Any ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCAmelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
UpperCAmelCase_ = info
@classmethod
def _lowercase (cls : int , __a : Optional[int] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__a , __a ):
UpperCAmelCase_ = 1 if v else 0
UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-"
UpperCAmelCase_ = f"""{key}{sep}{v}"""
name.append(__a )
return "_".join(__a )
@classmethod
def _lowercase (cls : Dict , __a : Dict ):
UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = repr.split("_" )
UpperCAmelCase_ = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" )
else:
UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a )
UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) )
UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCAmelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ = cls.DEFAULTS[k]
return parameters
| 78 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
SCREAMING_SNAKE_CASE_: Optional[int] =None
SCREAMING_SNAKE_CASE_: List[Any] ={
'7B': 1_10_08,
'13B': 1_38_24,
'30B': 1_79_20,
'65B': 2_20_16,
'70B': 2_86_72,
}
SCREAMING_SNAKE_CASE_: Any ={
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int=1 , snake_case_ : Optional[Any]=2_56 ) -> int:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with open(snake_case_ , "r" ) as f:
return json.load(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Any=True ) -> int:
'''simple docstring'''
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCAmelCase_ = os.path.join(snake_case_ , "tmp" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCAmelCase_ = read_json(os.path.join(snake_case_ , "params.json" ) )
UpperCAmelCase_ = NUM_SHARDS[model_size]
UpperCAmelCase_ = params["n_layers"]
UpperCAmelCase_ = params["n_heads"]
UpperCAmelCase_ = n_heads // num_shards
UpperCAmelCase_ = params["dim"]
UpperCAmelCase_ = dim // n_heads
UpperCAmelCase_ = 1_0000.0
UpperCAmelCase_ = 1.0 / (base ** (torch.arange(0 , snake_case_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase_ = params["n_kv_heads"] # for GQA / MQA
UpperCAmelCase_ = n_heads_per_shard // num_key_value_heads
UpperCAmelCase_ = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase_ = n_heads
UpperCAmelCase_ = n_heads_per_shard
UpperCAmelCase_ = dim
# permute for sliced rotary
def permute(snake_case_ : List[Any] , snake_case_ : Tuple=n_heads , snake_case_ : Tuple=dim , snake_case_ : List[str]=dim ):
return w.view(snake_case_ , dima // n_heads // 2 , 2 , snake_case_ ).transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase_ = torch.load(os.path.join(snake_case_ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
UpperCAmelCase_ = [
torch.load(os.path.join(snake_case_ , f"""consolidated.{i:02d}.pth""" ) , map_location="cpu" )
for i in range(snake_case_ )
]
UpperCAmelCase_ = 0
UpperCAmelCase_ = {"weight_map": {}}
for layer_i in range(snake_case_ ):
UpperCAmelCase_ = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase_ = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase_ = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
UpperCAmelCase_ = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(snake_case_ , snake_case_ , snake_case_ )
for i in range(snake_case_ )
] , dim=0 , ).reshape(snake_case_ , snake_case_ ) )
UpperCAmelCase_ = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
snake_case_ , snake_case_ , snake_case_ )
for i in range(snake_case_ )
] , dim=0 , ).reshape(snake_case_ , snake_case_ ) , snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
snake_case_ , snake_case_ , snake_case_ )
for i in range(snake_case_ )
] , dim=0 , ).reshape(snake_case_ , snake_case_ )
UpperCAmelCase_ = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(snake_case_ )] , dim=1 )
UpperCAmelCase_ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(snake_case_ )] , dim=0 )
UpperCAmelCase_ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(snake_case_ )] , dim=1 )
UpperCAmelCase_ = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(snake_case_ )] , dim=0 )
UpperCAmelCase_ = inv_freq
for k, v in state_dict.items():
UpperCAmelCase_ = filename
param_count += v.numel()
torch.save(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
UpperCAmelCase_ = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase_ = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
UpperCAmelCase_ = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(snake_case_ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(snake_case_ )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase_ = filename
param_count += v.numel()
torch.save(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
# Write configs
UpperCAmelCase_ = {"total_size": param_count * 2}
write_json(snake_case_ , os.path.join(snake_case_ , "pytorch_model.bin.index.json" ) )
UpperCAmelCase_ = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
UpperCAmelCase_ = params["multiple_of"] if "multiple_of" in params else 2_56
UpperCAmelCase_ = LlamaConfig(
hidden_size=snake_case_ , intermediate_size=compute_intermediate_size(snake_case_ , snake_case_ , snake_case_ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=snake_case_ , )
config.save_pretrained(snake_case_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
UpperCAmelCase_ = LlamaForCausalLM.from_pretrained(snake_case_ , torch_dtype=torch.floataa , low_cpu_mem_usage=snake_case_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(snake_case_ , safe_serialization=snake_case_ )
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
UpperCAmelCase_ = tokenizer_class(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=snake_case_ , help="Whether or not to save using `safetensors`." )
UpperCAmelCase_ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase_ = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , snake_case_ )
if __name__ == "__main__":
main()
| 78 | '''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | 1 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[Any] = WavaVecaPhonemeCTCTokenizer
a__ : int = False
def _lowercase (self : Optional[Any] ):
super().setUp()
UpperCAmelCase_ = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
UpperCAmelCase_ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase_ = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
def _lowercase (self : List[str] , __a : Dict , __a : str=False , __a : Union[str, Any]=20 , __a : str=5 ):
UpperCAmelCase_ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__a )) for i in range(len(__a ) )]
UpperCAmelCase_ = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
UpperCAmelCase_ = " " + output_txt
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def _lowercase (self : Optional[Any] , **__a : Optional[int] ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
UpperCAmelCase_ = tokenizer("m xxx ɪ" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
UpperCAmelCase_ = tokenizer("m aaa ɪ ccc" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase_ = tokenizer("maɪ c" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [3, 200] ) # mai should be <unk> (=3)
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(__a , "h ə l oʊ h aʊ ɑːɹ j uː" )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def _lowercase (self : int ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
UpperCAmelCase_ = tokenizer.decode(tokenizer(__a ).input_ids )
self.assertEqual(__a , __a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase_ = tokenizer.decode(sample_ids[0] )
UpperCAmelCase_ = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def _lowercase (self : str ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(__a , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase_ = tokenizer.decode(sample_ids[0] )
UpperCAmelCase_ = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
UpperCAmelCase_ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__a )
UpperCAmelCase_ = tokenizer.batch_decode(__a , filter_word_delimiter_token=__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def _lowercase (self : str ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
UpperCAmelCase_ = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(__a , __a )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
UpperCAmelCase_ = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__a )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer(__a , phonemizer_lang="en-us" ).input_ids
UpperCAmelCase_ = tokenizer(__a , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__a , __a )
UpperCAmelCase_ = tokenizer.decode(__a )
UpperCAmelCase_ = tokenizer.decode(__a )
self.assertEqual(__a , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__a , "ɛ l o h aʊ a ʁ j u" )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ = "Hello how Are you"
UpperCAmelCase_ = "hello how are you"
UpperCAmelCase_ = tokenizer(__a ).input_ids
UpperCAmelCase_ = tokenizer(__a ).input_ids
self.assertEqual(__a , __a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
UpperCAmelCase_ = tokenizer.batch_decode(__a )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def _lowercase (__a : Tuple , __a : Optional[int] ):
UpperCAmelCase_ = [d[key] for d in offsets]
return retrieved_list
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase_ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase_ = tokenizer.decode(__a , output_char_offsets=__a , filter_word_delimiter_token=__a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__a , __a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(__a : str , __a : List[Any] ):
self.assertTrue(isinstance(__a , __a ) )
self.assertTrue(isinstance(outputs_list[0] , __a ) )
# transform list to ModelOutput
UpperCAmelCase_ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(__a : Optional[Any] , __a : Optional[Any] ):
if isinstance(__a , __a ):
[recursive_check(__a , __a ) for la, la in zip(__a , __a )]
self.assertEqual(__a , __a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase_ = tokenizer.batch_decode(__a , output_char_offsets=__a )
UpperCAmelCase_ = [tokenizer.decode(__a , output_char_offsets=__a ) for ids in sample_ids]
check_list_tuples_equal(__a , __a )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def _lowercase (self : Union[str, Any] ):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def _lowercase (self : Union[str, Any] ):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def _lowercase (self : Optional[Any] ):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def _lowercase (self : Optional[Any] ):
pass
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
UpperCAmelCase_ = tokenizer.add_tokens(__a )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
UpperCAmelCase_ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase_ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
UpperCAmelCase_ = tokenizer.add_special_tokens(__a )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
UpperCAmelCase_ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _lowercase (self : List[str] ):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _lowercase (self : int ):
pass
def _lowercase (self : Any ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCAmelCase_ = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(output["text"] , __a )
| 78 | '''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : list , snake_case_ : int ) -> int:
'''simple docstring'''
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ = [p / w for p, w in zip(snake_case_ , snake_case_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ = sorted(snake_case_ )
# declaring useful variables
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ = sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ = profit_by_weight.index(snake_case_ )
UpperCAmelCase_ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
SCREAMING_SNAKE_CASE_: Tuple =[int(x) for x in input('Input profits separated by spaces: ').split()]
SCREAMING_SNAKE_CASE_: Any =[int(x) for x in input('Input weights separated by spaces: ').split()]
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 78 | '''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class __A :
def __init__(self : int , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ):
UpperCAmelCase_ = regularization
UpperCAmelCase_ = gamma
if kernel == "linear":
UpperCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(__a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.dot(__a , __a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ):
UpperCAmelCase_ = observations
UpperCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
UpperCAmelCase_ = 0
((UpperCAmelCase_) , ) = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 )
UpperCAmelCase_ = Bounds(0 , self.regularization )
UpperCAmelCase_ = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
UpperCAmelCase_ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ = s / n
def _lowercase (self : Optional[int] , __a : ndarray ):
UpperCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
from torch import nn
def lowerCAmelCase_ ( snake_case_ : int ) -> Dict:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 78 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """perceiver"""
def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ):
super().__init__(**__a )
UpperCAmelCase_ = num_latents
UpperCAmelCase_ = d_latents
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_blocks
UpperCAmelCase_ = num_self_attends_per_block
UpperCAmelCase_ = num_self_attention_heads
UpperCAmelCase_ = num_cross_attention_heads
UpperCAmelCase_ = qk_channels
UpperCAmelCase_ = v_channels
UpperCAmelCase_ = cross_attention_shape_for_attention
UpperCAmelCase_ = self_attention_widening_factor
UpperCAmelCase_ = cross_attention_widening_factor
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
# image classification attributes
UpperCAmelCase_ = image_size
# flow attributes
UpperCAmelCase_ = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = audio_samples_per_frame
UpperCAmelCase_ = samples_per_patch
UpperCAmelCase_ = output_shape
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _lowercase (self : Optional[Any] ):
return 1E-4
def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("input_ids" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 78 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: List[Any] ={
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | '''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 78 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __A ( UpperCamelCase__ ):
a__ : bool = field(default=UpperCamelCase__ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
a__ : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
a__ : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
a__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=UpperCamelCase__ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(__a , __a ):
UpperCAmelCase_ = v.to_dict()
return d
| 78 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __A :
def __init__(self : str , __a : Any ):
UpperCAmelCase_ = data
UpperCAmelCase_ = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowercase (__a : List[str] , __a : str ):
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowercase (self : List[str] ):
UpperCAmelCase_ = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def _lowercase (self : Optional[int] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowercase (self : Union[str, Any] , __a : Optional[int] ):
UpperCAmelCase_ = list(struct.unpack(">16L" , __a ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.padding()
UpperCAmelCase_ = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ = self.expand_block(__a )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ = (b & c) | ((~b) & d)
UpperCAmelCase_ = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
self.rotate(__a , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__a , 30 ),
c,
d,
)
UpperCAmelCase_ = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = B"Test String"
assert SHAaHash(snake_case_ ).final_hash() == hashlib.shaa(snake_case_ ).hexdigest() # noqa: S324
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCAmelCase_ = f.read()
else:
UpperCAmelCase_ = bytes(snake_case_ , "utf-8" )
print(SHAaHash(snake_case_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 78 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list ) -> list:
'''simple docstring'''
UpperCAmelCase_ = len(snake_case_ )
for _ in range(snake_case_ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCAmelCase_ , UpperCAmelCase_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =list(range(10, 0, -1))
print(f"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 78 | '''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: int =logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
@classmethod
def _lowercase (cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase (cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
| 78 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: int ={'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_: List[Any] ={
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : int = ["""input_ids""", """attention_mask"""]
a__ : Optional[Any] = None
def __init__(self : Optional[Any] , __a : str=None , __a : Any=None , __a : int=None , __a : Union[str, Any]="<unk>" , __a : Optional[Any]="<s>" , __a : List[str]="</s>" , __a : List[Any]="<pad>" , __a : Any=False , __a : Any=False , **__a : Optional[Any] , ):
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = getattr(__a , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**__a )
UpperCAmelCase_ = add_prefix_space
def _lowercase (self : Optional[int] , *__a : Union[str, Any] , **__a : Optional[Any] ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._batch_encode_plus(*__a , **__a )
def _lowercase (self : Tuple , *__a : List[str] , **__a : Dict ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._encode_plus(*__a , **__a )
def _lowercase (self : Optional[int] , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _lowercase (self : Tuple , __a : "Conversation" ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 78 | '''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_: Any =False
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __A :
def __init__(self : int , __a : str = None , __a : list = [] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def _lowercase (self : Any , __a : int ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ):
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowercase (self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowercase (self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowercase (self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowercase (self : str ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def _lowercase (self : Optional[Any] , __a : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 78 | 1 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : bool , snake_case_ : bool ) -> Union[str, Any]:
'''simple docstring'''
def run_func(snake_case_ : Any ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Tuple , **snake_case_ : Any ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Any , **snake_case_ : str ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
'''simple docstring'''
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __A ( UpperCamelCase__ ):
a__ : TensorFlowBenchmarkArguments
a__ : PretrainedConfig
a__ : str = "TensorFlow"
@property
def _lowercase (self : List[str] ):
return tf.__version__
def _lowercase (self : Optional[Any] , __a : str , __a : int , __a : int ):
# initialize GPU on separate process
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ = self._prepare_inference_func(__a , __a , __a )
return self._measure_speed(_inference )
def _lowercase (self : Optional[Any] , __a : str , __a : int , __a : int ):
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ = self._prepare_train_func(__a , __a , __a )
return self._measure_speed(_train )
def _lowercase (self : Optional[int] , __a : str , __a : int , __a : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __a )
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ = self._prepare_inference_func(__a , __a , __a )
return self._measure_memory(_inference )
def _lowercase (self : Dict , __a : str , __a : int , __a : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __a )
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ = self._prepare_train_func(__a , __a , __a )
return self._measure_memory(_train )
def _lowercase (self : Optional[Any] , __a : str , __a : int , __a : int ):
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase_ = (
hasattr(__a , "architectures" )
and isinstance(config.architectures , __a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase_ = getattr(__a , __a )
UpperCAmelCase_ = model_cls(__a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](__a )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(__a , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(__a , __a , __a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__a , decoder_input_ids=__a , training=__a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__a , training=__a )
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowercase (self : Dict , __a : str , __a : int , __a : int ):
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase_ = (
hasattr(__a , "architectures" )
and isinstance(config.architectures , __a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase_ = getattr(__a , __a )
UpperCAmelCase_ = model_cls(__a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__a )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(__a , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(__a , __a , __a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase_ = model(__a , decoder_input_ids=__a , labels=__a , training=__a )[0]
UpperCAmelCase_ = tf.gradients(__a , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase_ = model(__a , labels=__a , training=__a )[0]
UpperCAmelCase_ = tf.gradients(__a , model.trainable_variables )
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowercase (self : Dict , __a : str ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__a , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
__a , repeat=self.args.repeat , number=10 , )
return min(__a ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def _lowercase (self : Dict , __a : Callable[[], None] ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase_ = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase_ = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(__a )
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(__a )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(__a )
UpperCAmelCase_ = Memory(__a ) if isinstance(__a , __a ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(__a )
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 78 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor']
SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | 1 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: List[Any] =logging.getLogger()
def lowerCAmelCase_ ( snake_case_ : Path , snake_case_ : list ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "\n".join(snake_case_ )
Path(snake_case_ ).open("w" ).writelines(snake_case_ )
SCREAMING_SNAKE_CASE_: Any ='patrickvonplaten/t5-tiny-random'
SCREAMING_SNAKE_CASE_: Dict ='sshleifer/bart-tiny-random'
SCREAMING_SNAKE_CASE_: Any ='sshleifer/tiny-mbart'
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : List[str] ):
UpperCAmelCase_ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
UpperCAmelCase_ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
UpperCAmelCase_ = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(__a , __a )
UpperCAmelCase_ = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
UpperCAmelCase_ = "translation_en_to_de" if model == T5_TINY else "summarization"
UpperCAmelCase_ = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(__a , "argv" , __a ):
run_generate()
assert Path(__a ).exists()
# os.remove(Path(output_file_name))
def _lowercase (self : Union[str, Any] ):
self.run_eval_tester(__a )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _lowercase (self : List[Any] , __a : str ):
self.run_eval_tester(__a )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _lowercase (self : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
UpperCAmelCase_ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
UpperCAmelCase_ = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
UpperCAmelCase_ = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ = str(tmp_dir / "scores.json" )
UpperCAmelCase_ = str(tmp_dir / "val.target" )
_dump_articles(__a , text["en"] )
_dump_articles(__a , text["de"] )
UpperCAmelCase_ = "translation_en_to_de" if model == T5_TINY else "summarization"
UpperCAmelCase_ = f"""
run_eval_search.py
{model}
{str(__a )}
{str(__a )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(__a , "argv" , __a ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase_ = [" num_beams | length_penalty", model, "Best score args"]
UpperCAmelCase_ = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(__a )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__a ).exists()
os.remove(Path(__a ) )
| 78 | '''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_: Any ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
elif args.student_type == "gpt2":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=snake_case_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." )
UpperCAmelCase_ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ )
UpperCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase_ = special_tok_ids
UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase_ = 0.0 # do not predict special tokens
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
UpperCAmelCase_ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase_ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 78 | 1 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : int = AutoencoderKL
a__ : Optional[Any] = """sample"""
a__ : Union[str, Any] = 1e-2
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase (self : Any ):
return (3, 32, 32)
@property
def _lowercase (self : Dict ):
return (3, 32, 32)
def _lowercase (self : int ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowercase (self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ = torch.randn_like(__a )
UpperCAmelCase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ = dict(model.named_parameters() )
UpperCAmelCase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
UpperCAmelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
UpperCAmelCase_ = model.to(__a )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ = torch.manual_seed(0 )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ = image.to(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Dict , __a : Dict , __a : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy"""
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ):
UpperCAmelCase_ = "fp16" if fpaa else None
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = AutoencoderKL.from_pretrained(
__a , subfolder="vae" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def _lowercase (self : List[Any] , __a : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Dict , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : int , __a : int , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : List[str] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model.encode(__a ).latent_dist
UpperCAmelCase_ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a , __a , atol=__a )
| 78 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A ( unittest.TestCase ):
def __init__(self : int , __a : str , __a : Dict=13 , __a : List[Any]=3 , __a : int=224 , __a : Optional[int]=30 , __a : List[str]=400 , __a : int=True , __a : str=None , __a : Dict=True , __a : Dict=[0.5, 0.5, 0.5] , __a : int=[0.5, 0.5, 0.5] , ):
UpperCAmelCase_ = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def _lowercase (self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[str] = ViTImageProcessor if is_vision_available() else None
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = EfficientFormerImageProcessorTester(self )
@property
def _lowercase (self : List[Any] ):
return self.image_proc_tester.prepare_image_processor_dict()
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
# Initialize image_processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processor(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _lowercase (self : int ):
# Initialize image_processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processor(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _lowercase (self : Dict ):
# Initialize image_processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processor(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 78 | '''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int = 2 , snake_case_ : int = 1 , snake_case_ : int = 3 , ) -> int | None:
'''simple docstring'''
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> int:
return (pow(snake_case_ , 2 ) + step) % modulus
for _ in range(snake_case_ ):
# These track the position within the cycle detection logic.
UpperCAmelCase_ = seed
UpperCAmelCase_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCAmelCase_ = rand_fn(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rand_fn(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rand_fn(snake_case_ , snake_case_ , snake_case_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCAmelCase_ = gcd(hare - tortoise , snake_case_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCAmelCase_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
SCREAMING_SNAKE_CASE_: List[Any] =pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"{args.num} is probably prime")
else:
SCREAMING_SNAKE_CASE_: str =args.num // divisor
print(f"{args.num} = {divisor} * {quotient}")
| 78 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
# load original model from timm
UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 )
UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __A ( UpperCamelCase__ ):
a__ : List[Any] = ["""vqvae"""]
def __init__(self : List[Any] , __a : AutoencoderKL , __a : UNetaDConditionModel , __a : Mel , __a : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a )
def _lowercase (self : Optional[int] ):
return 50 if isinstance(self.scheduler , __a ) else 1000
@torch.no_grad()
def __call__(self : Union[str, Any] , __a : int = 1 , __a : str = None , __a : np.ndarray = None , __a : int = 0 , __a : int = 0 , __a : int = None , __a : torch.Generator = None , __a : float = 0 , __a : float = 0 , __a : torch.Generator = None , __a : float = 0 , __a : torch.Tensor = None , __a : torch.Tensor = None , __a : Tuple=True , ):
UpperCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a )
UpperCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
UpperCAmelCase_ = noise
UpperCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a )
UpperCAmelCase_ = self.mel.audio_slice_to_image(__a )
UpperCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase_ = (input_image / 255) * 2 - 1
UpperCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase_ = self.vqvae.encode(torch.unsqueeze(__a , 0 ) ).latent_dist.sample(
generator=__a )[0]
UpperCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase_ = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase_ = int(mask_start_secs * pixels_per_second )
UpperCAmelCase_ = int(mask_end_secs * pixels_per_second )
UpperCAmelCase_ = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __a ):
UpperCAmelCase_ = self.unet(__a , __a , __a )["sample"]
else:
UpperCAmelCase_ = self.unet(__a , __a )["sample"]
if isinstance(self.scheduler , __a ):
UpperCAmelCase_ = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )["prev_sample"]
else:
UpperCAmelCase_ = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )["prev_sample"]
if mask is not None:
if mask_start > 0:
UpperCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase_ = self.vqvae.decode(__a )["sample"]
UpperCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase_ = (images * 255).round().astype("uint8" )
UpperCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode="RGB" ).convert("L" ) for _ in images) )
UpperCAmelCase_ = [self.mel.image_to_audio(__a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a )[:, np.newaxis, :] ) , **ImagePipelineOutput(__a ) )
@torch.no_grad()
def _lowercase (self : Optional[Any] , __a : List[Image.Image] , __a : int = 50 ):
assert isinstance(self.scheduler , __a )
self.scheduler.set_timesteps(__a )
UpperCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase_ = (sample / 255) * 2 - 1
UpperCAmelCase_ = torch.Tensor(__a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase_ = self.scheduler.alphas_cumprod[t]
UpperCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = self.unet(__a , __a )["sample"]
UpperCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase (__a : torch.Tensor , __a : torch.Tensor , __a : float ):
UpperCAmelCase_ = acos(torch.dot(torch.flatten(__a ) , torch.flatten(__a ) ) / torch.norm(__a ) / torch.norm(__a ) )
return sin((1 - alpha) * theta ) * xa / sin(__a ) + sin(alpha * theta ) * xa / sin(__a )
| 78 | '''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = 0
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def _lowercase (self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
UpperCAmelCase_ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Tuple ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" )
def _lowercase (self : Optional[int] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowercase (self : Optional[int] ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : Optional[int] ):
class __A ( UpperCamelCase__ ):
a__ : str = True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 78 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(snake_case_ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "weight" in name:
UpperCAmelCase_ = "weight"
elif "bias" in name:
UpperCAmelCase_ = "bias"
else:
UpperCAmelCase_ = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : str=None , snake_case_ : Tuple=None , snake_case_ : Dict=True ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = HubertConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = HubertConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(snake_case_ , "vocab.json" )
if not os.path.isdir(snake_case_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with open(snake_case_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , snake_case_ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=snake_case_ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
UpperCAmelCase_ = HubertForCTC(snake_case_ )
else:
UpperCAmelCase_ = HubertModel(snake_case_ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
SCREAMING_SNAKE_CASE_: Optional[Any] =parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 78 | '''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78 | 1 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE_: int =[
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/transformers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case_ : i.created_at , reverse=snake_case_ )
UpperCAmelCase_ = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 78 | '''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78 | 1 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ={
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE_: Optional[Any] ={value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = "Morse code here!"
print(snake_case_ )
UpperCAmelCase_ = encrypt(snake_case_ )
print(snake_case_ )
UpperCAmelCase_ = decrypt(snake_case_ )
print(snake_case_ )
if __name__ == "__main__":
main()
| 78 | '''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
a__ : Optional[Any] = 2
@register_to_config
def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ):
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
UpperCAmelCase_ = use_karras_sigmas
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ):
if schedule_timesteps is None:
UpperCAmelCase_ = self.timesteps
UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ = 1 if len(__a ) > 1 else 0
else:
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
UpperCAmelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ = np.log(__a )
UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a )
UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ = torch.from_numpy(__a )
UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa )
else:
UpperCAmelCase_ = timesteps.to(device=__a )
# empty dt and derivative
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ = defaultdict(__a )
def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ):
# get log sigma
UpperCAmelCase_ = np.log(__a )
# get distribution
UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ = low_idx + 1
UpperCAmelCase_ = log_sigmas[low_idx]
UpperCAmelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ = (low - log_sigma) / (low - high)
UpperCAmelCase_ = np.clip(__a , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ = t.reshape(sigma.shape )
return t
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ):
UpperCAmelCase_ = in_sigmas[-1].item()
UpperCAmelCase_ = in_sigmas[0].item()
UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ = np.linspace(0 , 1 , __a )
UpperCAmelCase_ = sigma_min ** (1 / rho)
UpperCAmelCase_ = sigma_max ** (1 / rho)
UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self : List[str] ):
return self.dt is None
def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
# advance index counter by 1
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ = self.sigmas[step_index - 1]
UpperCAmelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ = 0
UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ = derivative
UpperCAmelCase_ = dt
UpperCAmelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ = self.dt
UpperCAmelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ = self.timesteps.to(original_samples.device )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps]
UpperCAmelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sigma.unsqueeze(-1 )
UpperCAmelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__(self : str ):
return self.config.num_train_timesteps
| 78 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> tuple:
'''simple docstring'''
UpperCAmelCase_ = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Any =['PerceiverFeatureExtractor']
SCREAMING_SNAKE_CASE_: Tuple =['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | '''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_: Union[str, Any] ={'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | '''simple docstring'''
import copy
import re
class __A :
a__ : Optional[int] = """hp"""
a__ : Optional[Any] = {}
a__ : List[Any] = None
@classmethod
def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ):
UpperCAmelCase_ = prefix
UpperCAmelCase_ = defaults
cls.build_naming_info()
@staticmethod
def _lowercase (__a : List[Any] , __a : List[str] ):
if len(__a ) == 0:
return ""
UpperCAmelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
UpperCAmelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a : Union[str, Any] ):
UpperCAmelCase_ = ""
while integer != 0:
UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = sword
break
UpperCAmelCase_ = short_word
UpperCAmelCase_ = word
return short_word
@staticmethod
def _lowercase (__a : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = param_name.split("_" )
UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ = ["", "_"]
for separator in separators:
UpperCAmelCase_ = separator.join(__a )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ = shortname
UpperCAmelCase_ = param_name
return shortname
return param_name
@staticmethod
def _lowercase (__a : int , __a : Union[str, Any] ):
UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a )
UpperCAmelCase_ = short_name
UpperCAmelCase_ = param_name
@classmethod
def _lowercase (cls : Any ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCAmelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
UpperCAmelCase_ = info
@classmethod
def _lowercase (cls : int , __a : Optional[int] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__a , __a ):
UpperCAmelCase_ = 1 if v else 0
UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-"
UpperCAmelCase_ = f"""{key}{sep}{v}"""
name.append(__a )
return "_".join(__a )
@classmethod
def _lowercase (cls : Dict , __a : Dict ):
UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = repr.split("_" )
UpperCAmelCase_ = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" )
else:
UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a )
UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) )
UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCAmelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ = cls.DEFAULTS[k]
return parameters
| 78 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , *__a : Union[str, Any] , **__a : Tuple ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 78 | '''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | 1 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = k_size // 2
UpperCAmelCase_ , UpperCAmelCase_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCAmelCase_ = 1 / (2 * pi * sigma) * exp(-(square(snake_case_ ) + square(snake_case_ )) / (2 * square(snake_case_ )) )
return g
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[0], image.shape[1]
# dst image height and width
UpperCAmelCase_ = height - k_size + 1
UpperCAmelCase_ = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCAmelCase_ = zeros((dst_height * dst_width, k_size * k_size) )
UpperCAmelCase_ = 0
for i, j in product(range(snake_case_ ) , range(snake_case_ ) ):
UpperCAmelCase_ = ravel(image[i : i + k_size, j : j + k_size] )
UpperCAmelCase_ = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCAmelCase_ = gen_gaussian_kernel(snake_case_ , snake_case_ )
UpperCAmelCase_ = ravel(snake_case_ )
# reshape and get the dst image
UpperCAmelCase_ = dot(snake_case_ , snake_case_ ).reshape(snake_case_ , snake_case_ ).astype(snake_case_ )
return dst
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE_: Any =imread(r'../image_data/lena.jpg')
# turn image in gray scale value
SCREAMING_SNAKE_CASE_: Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
SCREAMING_SNAKE_CASE_: int =gaussian_filter(gray, 3, sigma=1)
SCREAMING_SNAKE_CASE_: Any =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 78 | '''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 | 1 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
SCREAMING_SNAKE_CASE_: Dict =data_utils.TransfoXLTokenizer
SCREAMING_SNAKE_CASE_: Union[str, Any] =data_utils.TransfoXLCorpus
SCREAMING_SNAKE_CASE_: Union[str, Any] =data_utils
SCREAMING_SNAKE_CASE_: Dict =data_utils
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : List[Any] ) -> Any:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , snake_case_ )
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(snake_case_ )
UpperCAmelCase_ = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(snake_case_ )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 78 | '''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class __A :
def __init__(self : int , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ):
UpperCAmelCase_ = regularization
UpperCAmelCase_ = gamma
if kernel == "linear":
UpperCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(__a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.dot(__a , __a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ):
UpperCAmelCase_ = observations
UpperCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
UpperCAmelCase_ = 0
((UpperCAmelCase_) , ) = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 )
UpperCAmelCase_ = Bounds(0 , self.regularization )
UpperCAmelCase_ = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
UpperCAmelCase_ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ = s / n
def _lowercase (self : Optional[int] , __a : ndarray ):
UpperCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
import math
import os
import sys
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = ""
try:
with open(snake_case_ , "rb" ) as binary_file:
UpperCAmelCase_ = binary_file.read()
for dat in data:
UpperCAmelCase_ = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def lowerCAmelCase_ ( snake_case_ : dict[str, str] , snake_case_ : str , snake_case_ : int , snake_case_ : str ) -> None:
'''simple docstring'''
lexicon.pop(snake_case_ )
UpperCAmelCase_ = last_match_id
if math.loga(snake_case_ ).is_integer():
for curr_key in lexicon:
UpperCAmelCase_ = "0" + lexicon[curr_key]
UpperCAmelCase_ = bin(snake_case_ )[2:]
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {"0": "0", "1": "1"}
UpperCAmelCase_ , UpperCAmelCase_ = "", ""
UpperCAmelCase_ = len(snake_case_ )
for i in range(len(snake_case_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
index += 1
UpperCAmelCase_ = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCAmelCase_ = lexicon[curr_string]
result += last_match_id
return result
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = os.path.getsize(snake_case_ )
UpperCAmelCase_ = bin(snake_case_ )[2:]
UpperCAmelCase_ = len(snake_case_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = 8
try:
with open(snake_case_ , "wb" ) as opened_file:
UpperCAmelCase_ = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case_ ) , snake_case_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case_ , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = read_file_binary(snake_case_ )
UpperCAmelCase_ = compress_data(snake_case_ )
UpperCAmelCase_ = add_file_length(snake_case_ , snake_case_ )
write_file_binary(snake_case_ , snake_case_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 78 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """perceiver"""
def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ):
super().__init__(**__a )
UpperCAmelCase_ = num_latents
UpperCAmelCase_ = d_latents
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_blocks
UpperCAmelCase_ = num_self_attends_per_block
UpperCAmelCase_ = num_self_attention_heads
UpperCAmelCase_ = num_cross_attention_heads
UpperCAmelCase_ = qk_channels
UpperCAmelCase_ = v_channels
UpperCAmelCase_ = cross_attention_shape_for_attention
UpperCAmelCase_ = self_attention_widening_factor
UpperCAmelCase_ = cross_attention_widening_factor
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
# image classification attributes
UpperCAmelCase_ = image_size
# flow attributes
UpperCAmelCase_ = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = audio_samples_per_frame
UpperCAmelCase_ = samples_per_patch
UpperCAmelCase_ = output_shape
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _lowercase (self : Optional[Any] ):
return 1E-4
def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("input_ids" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 78 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
raise RuntimeError("CUDA out of memory." )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] ):
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4 )
UpperCAmelCase_ = nn.BatchNormad(4 )
UpperCAmelCase_ = nn.Linear(4 , 5 )
def _lowercase (self : int , __a : List[str] ):
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class __A ( unittest.TestCase ):
def _lowercase (self : int ):
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : List[Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [128, 64, 32, 16, 8] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase_ , UpperCAmelCase_ = mock_training_loop_function("hello" )
self.assertListEqual(__a , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def _lowercase (self : Any ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Tuple ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _lowercase (self : Optional[int] ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__a : int ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _lowercase (self : List[Any] ):
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__a : Tuple , __a : Dict , __a : Union[str, Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def _lowercase (self : Any ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def _lowercase (self : Any ):
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
UpperCAmelCase_ = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 78 | '''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : float | Decimal , snake_case_ : float = 10**-10 ) -> float:
'''simple docstring'''
UpperCAmelCase_ = a
while True:
UpperCAmelCase_ = Decimal(snake_case_ ) - (
Decimal(eval(snake_case_ ) ) / Decimal(eval(str(diff(snake_case_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case_ ) ) < precision: # noqa: S307
return float(snake_case_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 78 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE_: List[Any] ='docs/source/en/_toctree.yml'
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = defaultdict(snake_case_ )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(snake_case_ )
UpperCAmelCase_ = new_doc_list
UpperCAmelCase_ = [key for key, value in counts.items() if value > 1]
UpperCAmelCase_ = []
for duplicate_key in duplicates:
UpperCAmelCase_ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(snake_case_ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
UpperCAmelCase_ = sorted(snake_case_ , key=lambda snake_case_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case_ ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(snake_case_ )
# Sort
return overview_doc
def lowerCAmelCase_ ( snake_case_ : Tuple=False ) -> Any:
'''simple docstring'''
with open(snake_case_ , encoding="utf-8" ) as f:
UpperCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase_ = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase_ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCAmelCase_ = api_doc[scheduler_idx]["sections"]
UpperCAmelCase_ = clean_doc_toc(snake_case_ )
UpperCAmelCase_ = False
if new_scheduler_doc != scheduler_doc:
UpperCAmelCase_ = True
if overwrite:
UpperCAmelCase_ = new_scheduler_doc
if diff:
if overwrite:
UpperCAmelCase_ = api_doc
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(snake_case_ , allow_unicode=snake_case_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False ) -> List[str]:
'''simple docstring'''
with open(snake_case_ , encoding="utf-8" ) as f:
UpperCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase_ = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase_ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCAmelCase_ = False
UpperCAmelCase_ = api_doc[pipeline_idx]["sections"]
UpperCAmelCase_ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCAmelCase_ = pipeline_doc["section"]
UpperCAmelCase_ = clean_doc_toc(snake_case_ )
if overwrite:
UpperCAmelCase_ = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case_ )
# sort overall pipeline doc
UpperCAmelCase_ = clean_doc_toc(snake_case_ )
if new_pipeline_docs != pipeline_docs:
UpperCAmelCase_ = True
if overwrite:
UpperCAmelCase_ = new_pipeline_docs
if diff:
if overwrite:
UpperCAmelCase_ = api_doc
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(snake_case_ , allow_unicode=snake_case_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE_: Tuple =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 78 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: int =[
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> int:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase_ = k.replace(snake_case_ , snake_case_ )
if k.startswith("encoder" ):
UpperCAmelCase_ = k.replace(".attn" , ".self_attn" )
UpperCAmelCase_ = k.replace("norm1" , "self_attn_layer_norm" )
UpperCAmelCase_ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
UpperCAmelCase_ = k.replace("norm1" , "self_attn_layer_norm" )
UpperCAmelCase_ = k.replace("norm2" , "encoder_attn_layer_norm" )
UpperCAmelCase_ = k.replace("norm3" , "final_layer_norm" )
return k
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
UpperCAmelCase_ = sd.pop(snake_case_ )
UpperCAmelCase_ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
UpperCAmelCase_ = v
SCREAMING_SNAKE_CASE_: Optional[int] =['START']
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = model["model"]
UpperCAmelCase_ = BlenderbotConfig.from_json_file(snake_case_ )
UpperCAmelCase_ = BlenderbotForConditionalGeneration(snake_case_ )
UpperCAmelCase_ = m.model.state_dict().keys()
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase_ = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_ , strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
SCREAMING_SNAKE_CASE_: List[Any] =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 78 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Dict = AudioLDMPipeline
a__ : Dict = TEXT_TO_AUDIO_PARAMS
a__ : Dict = TEXT_TO_AUDIO_BATCH_PARAMS
a__ : List[Any] = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _lowercase (self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCAmelCase_ = ClapTextModelWithProjection(__a )
UpperCAmelCase_ = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
UpperCAmelCase_ = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , )
UpperCAmelCase_ = SpeechTaHifiGan(__a )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _lowercase (self : int , __a : Optional[Any] , __a : Dict=0 ):
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = audioldm_pipe(**__a )
UpperCAmelCase_ = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
UpperCAmelCase_ = audio[:10]
UpperCAmelCase_ = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ = audioldm_pipe(**__a )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ = audioldm_pipe.tokenizer(
__a , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCAmelCase_ = text_inputs["input_ids"].to(__a )
UpperCAmelCase_ = audioldm_pipe.text_encoder(
__a , )
UpperCAmelCase_ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ = F.normalize(__a , dim=-1 )
UpperCAmelCase_ = prompt_embeds
# forward
UpperCAmelCase_ = audioldm_pipe(**__a )
UpperCAmelCase_ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * ["this is a negative prompt"]
UpperCAmelCase_ = negative_prompt
UpperCAmelCase_ = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ = audioldm_pipe(**__a )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ = audioldm_pipe.tokenizer(
__a , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCAmelCase_ = text_inputs["input_ids"].to(__a )
UpperCAmelCase_ = audioldm_pipe.text_encoder(
__a , )
UpperCAmelCase_ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ = F.normalize(__a , dim=-1 )
embeds.append(__a )
UpperCAmelCase_ , UpperCAmelCase_ = embeds
# forward
UpperCAmelCase_ = audioldm_pipe(**__a )
UpperCAmelCase_ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _lowercase (self : int ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = "egg cracking"
UpperCAmelCase_ = audioldm_pipe(**__a , negative_prompt=__a )
UpperCAmelCase_ = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
UpperCAmelCase_ = audio[:10]
UpperCAmelCase_ = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=__a )
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ = audioldm_pipe(__a , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ = 2
UpperCAmelCase_ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ = 2
UpperCAmelCase_ = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ = 2
UpperCAmelCase_ = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _lowercase (self : Dict ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = audioldm_pipe(audio_length_in_s=0.0_16 , **__a )
UpperCAmelCase_ = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.0_16
UpperCAmelCase_ = audioldm_pipe(audio_length_in_s=0.0_32 , **__a )
UpperCAmelCase_ = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.0_32
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = AudioLDMPipeline(**__a )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = ["hey"]
UpperCAmelCase_ = audioldm_pipe(__a , num_inference_steps=1 )
UpperCAmelCase_ = output.audios.shape
assert audio_shape == (1, 256)
UpperCAmelCase_ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ = SpeechTaHifiGan(__a ).to(__a )
UpperCAmelCase_ = audioldm_pipe(__a , num_inference_steps=1 )
UpperCAmelCase_ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _lowercase (self : Dict ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a )
def _lowercase (self : int ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowercase (self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[int] , __a : Dict , __a : Optional[Any]="cpu" , __a : Union[str, Any]=torch.floataa , __a : Union[str, Any]=0 ):
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = np.random.RandomState(__a ).standard_normal((1, 8, 128, 16) )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a , dtype=__a )
UpperCAmelCase_ = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _lowercase (self : int ):
UpperCAmelCase_ = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(__a )
UpperCAmelCase_ = 25
UpperCAmelCase_ = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 81920
UpperCAmelCase_ = audio[77230:77240]
UpperCAmelCase_ = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
UpperCAmelCase_ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(__a )
UpperCAmelCase_ = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 81920
UpperCAmelCase_ = audio[27780:27790]
UpperCAmelCase_ = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
UpperCAmelCase_ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 78 | '''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: int =logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
@classmethod
def _lowercase (cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase (cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
| 78 | 1 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE_: Tuple =re.compile(r'\b(a|an|the)\b', re.UNICODE)
SCREAMING_SNAKE_CASE_: Optional[int] =None
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case_ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case_ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(snake_case_ : Optional[int] ):
return ARTICLES_REGEX.sub(" " , snake_case_ )
def white_space_fix(snake_case_ : Tuple ):
return " ".join(text.split() )
def remove_punc(snake_case_ : Optional[int] ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
if not s:
return []
return normalize_answer(snake_case_ ).split()
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return int(normalize_answer(snake_case_ ) == normalize_answer(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = get_tokens(snake_case_ )
UpperCAmelCase_ = get_tokens(snake_case_ )
UpperCAmelCase_ = collections.Counter(snake_case_ ) & collections.Counter(snake_case_ )
UpperCAmelCase_ = sum(common.values() )
if len(snake_case_ ) == 0 or len(snake_case_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(snake_case_ )
UpperCAmelCase_ = 1.0 * num_same / len(snake_case_ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(snake_case_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(snake_case_ , snake_case_ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(snake_case_ , snake_case_ ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any , snake_case_ : int=None ) -> List[Any]:
'''simple docstring'''
if not qid_list:
UpperCAmelCase_ = len(snake_case_ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(snake_case_ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : str ) -> List[str]:
'''simple docstring'''
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Any ) -> List[str]:
'''simple docstring'''
plt.step(snake_case_ , snake_case_ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(snake_case_ , snake_case_ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(snake_case_ )
plt.savefig(snake_case_ )
plt.clf()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[Any]=None , snake_case_ : int=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = sorted(snake_case_ , key=lambda snake_case_ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(snake_case_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(snake_case_ )
if i == len(snake_case_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case_ )
recalls.append(snake_case_ )
if out_image:
plot_pr_curve(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(snake_case_ ):
os.makedirs(snake_case_ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , out_image=os.path.join(snake_case_ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , out_image=os.path.join(snake_case_ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(snake_case_ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , out_image=os.path.join(snake_case_ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case_ , snake_case_ , "pr_exact" )
merge_eval(snake_case_ , snake_case_ , "pr_f1" )
merge_eval(snake_case_ , snake_case_ , "pr_oracle" )
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(snake_case_ ) / float(len(snake_case_ ) )
plt.hist(snake_case_ , weights=snake_case_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(snake_case_ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(snake_case_ , key=lambda snake_case_ : na_probs[k] )
for i, qid in enumerate(snake_case_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(snake_case_ ), best_thresh
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(snake_case_ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(snake_case_ , snake_case_ )
UpperCAmelCase_ = apply_no_ans_threshold(snake_case_ , snake_case_ , snake_case_ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(snake_case_ , snake_case_ , snake_case_ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(snake_case_ , snake_case_ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(snake_case_ , snake_case_ , qid_list=snake_case_ )
merge_eval(snake_case_ , snake_case_ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(snake_case_ , snake_case_ , qid_list=snake_case_ )
merge_eval(snake_case_ , snake_case_ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , OPTS.out_image_dir )
histogram_na_prob(snake_case_ , snake_case_ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(snake_case_ , snake_case_ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(snake_case_ , snake_case_ )
else:
print(json.dumps(snake_case_ , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 78 | '''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_: Any =False
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __A :
def __init__(self : int , __a : str = None , __a : list = [] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def _lowercase (self : Any , __a : int ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ):
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowercase (self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowercase (self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowercase (self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowercase (self : str ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def _lowercase (self : Optional[Any] , __a : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 78 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: int ={
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : str = """efficientnet"""
def __init__(self : Optional[int] , __a : int = 3 , __a : int = 600 , __a : float = 2.0 , __a : float = 3.1 , __a : int = 8 , __a : List[int] = [3, 3, 5, 3, 5, 5, 3] , __a : List[int] = [32, 16, 24, 40, 80, 112, 192] , __a : List[int] = [16, 24, 40, 80, 112, 192, 320] , __a : List[int] = [] , __a : List[int] = [1, 2, 2, 2, 1, 2, 1] , __a : List[int] = [1, 2, 2, 3, 3, 4, 1] , __a : List[int] = [1, 6, 6, 6, 6, 6, 6] , __a : float = 0.25 , __a : str = "swish" , __a : int = 2560 , __a : str = "mean" , __a : float = 0.02 , __a : float = 0.0_01 , __a : float = 0.99 , __a : float = 0.5 , __a : float = 0.2 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = width_coefficient
UpperCAmelCase_ = depth_coefficient
UpperCAmelCase_ = depth_divisor
UpperCAmelCase_ = kernel_sizes
UpperCAmelCase_ = in_channels
UpperCAmelCase_ = out_channels
UpperCAmelCase_ = depthwise_padding
UpperCAmelCase_ = strides
UpperCAmelCase_ = num_block_repeats
UpperCAmelCase_ = expand_ratios
UpperCAmelCase_ = squeeze_expansion_ratio
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = pooling_type
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = batch_norm_eps
UpperCAmelCase_ = batch_norm_momentum
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = drop_connect_rate
UpperCAmelCase_ = sum(__a ) * 4
class __A ( UpperCamelCase__ ):
a__ : Tuple = version.parse("""1.11""" )
@property
def _lowercase (self : List[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase (self : Dict ):
return 1E-5
| 78 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor']
SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: List[str] ={
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Any =[
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | '''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_: Any ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
elif args.student_type == "gpt2":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=snake_case_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." )
UpperCAmelCase_ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ )
UpperCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase_ = special_tok_ids
UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase_ = 0.0 # do not predict special tokens
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
UpperCAmelCase_ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase_ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 78 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
def __init__(self : Optional[Any] , __a : str , __a : Union[str, Any]=13 , __a : List[Any]=7 , __a : List[str]=6 , __a : Union[str, Any]=17 , __a : List[Any]=23 , __a : Optional[int]=11 , __a : str=True , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = act_dim
UpperCAmelCase_ = state_dim
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = max_length
UpperCAmelCase_ = is_training
def _lowercase (self : Tuple ):
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
UpperCAmelCase_ = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowercase (self : Any ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowercase (self : Tuple , __a : Any , __a : str , __a : Optional[Any] , __a : Optional[int] , __a : Dict , __a : List[str] , __a : Optional[int] , ):
UpperCAmelCase_ = DecisionTransformerModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , __a , __a , __a , __a , __a )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : str = (DecisionTransformerModel,) if is_torch_available() else ()
a__ : List[str] = ()
a__ : Optional[Any] = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
a__ : Any = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
a__ : Dict = False
a__ : List[str] = False
a__ : List[str] = False
a__ : List[Any] = False
a__ : Optional[int] = False
a__ : int = False
a__ : Union[str, Any] = False
a__ : List[str] = False
a__ : List[Any] = False
def _lowercase (self : Dict ):
UpperCAmelCase_ = DecisionTransformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowercase (self : Optional[int] ):
self.config_tester.run_common_tests()
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def _lowercase (self : Optional[Any] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = DecisionTransformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__a )] , __a )
@require_torch
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase_ = 10 # defined by the RL environment, may be normalized
UpperCAmelCase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase_ = model.to(__a )
UpperCAmelCase_ = model.config
torch.manual_seed(0 )
UpperCAmelCase_ = torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ) # env.reset()
UpperCAmelCase_ = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__a )
UpperCAmelCase_ = torch.tensor(__a , device=__a , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase_ = state
UpperCAmelCase_ = torch.zeros(1 , 0 , config.act_dim , device=__a , dtype=torch.floataa )
UpperCAmelCase_ = torch.zeros(1 , 0 , device=__a , dtype=torch.floataa )
UpperCAmelCase_ = torch.tensor(0 , device=__a , dtype=torch.long ).reshape(1 , 1 )
for step in range(__a ):
UpperCAmelCase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__a )] , dim=1 )
UpperCAmelCase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=__a )] , dim=1 )
UpperCAmelCase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model(
states=__a , actions=__a , rewards=__a , returns_to_go=__a , timesteps=__a , attention_mask=__a , return_dict=__a , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase_ = action_pred[0, -1]
UpperCAmelCase_ = torch.cat([states, state] , dim=1 )
UpperCAmelCase_ = returns_to_go[0, -1] - reward
UpperCAmelCase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=__a , dtype=torch.long ) * (step + 1)] , dim=1 )
| 78 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : int = AutoencoderKL
a__ : Optional[Any] = """sample"""
a__ : Union[str, Any] = 1e-2
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase (self : Any ):
return (3, 32, 32)
@property
def _lowercase (self : Dict ):
return (3, 32, 32)
def _lowercase (self : int ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowercase (self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ = torch.randn_like(__a )
UpperCAmelCase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ = dict(model.named_parameters() )
UpperCAmelCase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
UpperCAmelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
UpperCAmelCase_ = model.to(__a )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ = torch.manual_seed(0 )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ = image.to(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Dict , __a : Dict , __a : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy"""
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ):
UpperCAmelCase_ = "fp16" if fpaa else None
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = AutoencoderKL.from_pretrained(
__a , subfolder="vae" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def _lowercase (self : List[Any] , __a : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Dict , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : int , __a : int , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : List[str] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model.encode(__a ).latent_dist
UpperCAmelCase_ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a , __a , atol=__a )
| 78 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __A ( UpperCamelCase__ ):
def __init__(self : Union[str, Any] ):
UpperCAmelCase_ = []
def _lowercase (self : Any , __a : int , __a : Union[str, Any] , __a : Optional[Any] , **__a : Dict ):
self.events.append("on_init_end" )
def _lowercase (self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Any , **__a : List[str] ):
self.events.append("on_train_begin" )
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , **__a : Union[str, Any] ):
self.events.append("on_train_end" )
def _lowercase (self : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Union[str, Any] , **__a : Any ):
self.events.append("on_epoch_begin" )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , **__a : Optional[int] ):
self.events.append("on_epoch_end" )
def _lowercase (self : Any , __a : Optional[Any] , __a : Dict , __a : str , **__a : Union[str, Any] ):
self.events.append("on_step_begin" )
def _lowercase (self : int , __a : Tuple , __a : int , __a : Any , **__a : Dict ):
self.events.append("on_step_end" )
def _lowercase (self : List[Any] , __a : Dict , __a : int , __a : Dict , **__a : Optional[int] ):
self.events.append("on_evaluate" )
def _lowercase (self : int , __a : Union[str, Any] , __a : str , __a : Optional[Any] , **__a : int ):
self.events.append("on_predict" )
def _lowercase (self : int , __a : Any , __a : int , __a : List[Any] , **__a : Tuple ):
self.events.append("on_save" )
def _lowercase (self : List[Any] , __a : Any , __a : Optional[int] , __a : Tuple , **__a : Optional[Any] ):
self.events.append("on_log" )
def _lowercase (self : List[Any] , __a : Optional[Any] , __a : Optional[Any] , __a : int , **__a : int ):
self.events.append("on_prediction_step" )
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = tempfile.mkdtemp()
def _lowercase (self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def _lowercase (self : int , __a : Dict=0 , __a : str=0 , __a : Any=64 , __a : Any=64 , __a : List[str]=None , __a : Optional[int]=False , **__a : List[str] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
UpperCAmelCase_ = RegressionDataset(length=__a )
UpperCAmelCase_ = RegressionDataset(length=__a )
UpperCAmelCase_ = RegressionModelConfig(a=__a , b=__a )
UpperCAmelCase_ = RegressionPreTrainedModel(__a )
UpperCAmelCase_ = TrainingArguments(self.output_dir , disable_tqdm=__a , report_to=[] , **__a )
return Trainer(
__a , __a , train_dataset=__a , eval_dataset=__a , callbacks=__a , )
def _lowercase (self : List[Any] , __a : Dict , __a : List[Any] ):
self.assertEqual(len(__a ) , len(__a ) )
# Order doesn't matter
UpperCAmelCase_ = sorted(__a , key=lambda __a : cb.__name__ if isinstance(__a , __a ) else cb.__class__.__name__ )
UpperCAmelCase_ = sorted(__a , key=lambda __a : cb.__name__ if isinstance(__a , __a ) else cb.__class__.__name__ )
for cba, cba in zip(__a , __a ):
if isinstance(__a , __a ) and isinstance(__a , __a ):
self.assertEqual(__a , __a )
elif isinstance(__a , __a ) and not isinstance(__a , __a ):
self.assertEqual(__a , cba.__class__ )
elif not isinstance(__a , __a ) and isinstance(__a , __a ):
self.assertEqual(cba.__class__ , __a )
else:
self.assertEqual(__a , __a )
def _lowercase (self : Tuple , __a : Optional[Any] ):
UpperCAmelCase_ = ["on_init_end", "on_train_begin"]
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(trainer.get_eval_dataloader() )
UpperCAmelCase_ = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(__a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_trainer()
UpperCAmelCase_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __a )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase_ = self.get_trainer(disable_tqdm=__a )
UpperCAmelCase_ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __a )
def _lowercase (self : int ):
UpperCAmelCase_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase_ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__a )
expected_callbacks.remove(__a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __a )
UpperCAmelCase_ = self.get_trainer()
UpperCAmelCase_ = trainer.pop_callback(__a )
self.assertEqual(cb.__class__ , __a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __a )
trainer.add_callback(__a )
expected_callbacks.insert(0 , __a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __a )
# We can also add, pop, or remove by instance
UpperCAmelCase_ = self.get_trainer()
UpperCAmelCase_ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__a )
expected_callbacks.remove(__a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __a )
UpperCAmelCase_ = self.get_trainer()
UpperCAmelCase_ = trainer.callback_handler.callbacks[0]
UpperCAmelCase_ = trainer.pop_callback(__a )
self.assertEqual(__a , __a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __a )
trainer.add_callback(__a )
expected_callbacks.insert(0 , __a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __a )
def _lowercase (self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=__a )
UpperCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__a , self.get_expected_events(__a ) )
# Independent log/save/eval
UpperCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__a , self.get_expected_events(__a ) )
UpperCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__a , self.get_expected_events(__a ) )
UpperCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
UpperCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__a , self.get_expected_events(__a ) )
UpperCAmelCase_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
UpperCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__a , self.get_expected_events(__a ) )
# A bit of everything
UpperCAmelCase_ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
UpperCAmelCase_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__a , self.get_expected_events(__a ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
UpperCAmelCase_ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__a ) in warn_mock.call_args[0][0]
| 78 | '''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __A ( UpperCamelCase__ ):
a__ : BigBirdConfig
a__ : jnp.dtype = jnp.floataa
a__ : bool = True
def _lowercase (self : Dict ):
super().setup()
UpperCAmelCase_ = nn.Dense(5 , dtype=self.dtype )
def __call__(self : Optional[Any] , *__a : Tuple , **__a : List[Any] ):
UpperCAmelCase_ = super().__call__(*__a , **__a )
UpperCAmelCase_ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __A ( UpperCamelCase__ ):
a__ : str = FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> str:
'''simple docstring'''
def cross_entropy(snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[Any]=None ):
UpperCAmelCase_ = logits.shape[-1]
UpperCAmelCase_ = (labels[..., None] == jnp.arange(snake_case_ )[None]).astype("f4" )
UpperCAmelCase_ = jax.nn.log_softmax(snake_case_ , axis=-1 )
UpperCAmelCase_ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
UpperCAmelCase_ = reduction(snake_case_ )
return loss
UpperCAmelCase_ = partial(snake_case_ , reduction=jnp.mean )
UpperCAmelCase_ = cross_entropy(snake_case_ , snake_case_ )
UpperCAmelCase_ = cross_entropy(snake_case_ , snake_case_ )
UpperCAmelCase_ = cross_entropy(snake_case_ , snake_case_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __A :
a__ : str = "google/bigbird-roberta-base"
a__ : int = 3_000
a__ : int = 10_500
a__ : int = 128
a__ : int = 3
a__ : int = 1
a__ : int = 5
# tx_args
a__ : float = 3e-5
a__ : float = 0.0
a__ : int = 20_000
a__ : float = 0.0_0_9_5
a__ : str = "bigbird-roberta-natural-questions"
a__ : str = "training-expt"
a__ : str = "data/nq-training.jsonl"
a__ : str = "data/nq-validation.jsonl"
def _lowercase (self : str ):
os.makedirs(self.base_dir , exist_ok=__a )
UpperCAmelCase_ = os.path.join(self.base_dir , self.save_dir )
UpperCAmelCase_ = self.batch_size_per_device * jax.device_count()
@dataclass
class __A :
a__ : int
a__ : int = 4_096 # no dynamic padding on TPUs
def __call__(self : List[Any] , __a : Any ):
UpperCAmelCase_ = self.collate_fn(__a )
UpperCAmelCase_ = jax.tree_util.tree_map(__a , __a )
return batch
def _lowercase (self : Tuple , __a : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.fetch_inputs(features["input_ids"] )
UpperCAmelCase_ = {
"input_ids": jnp.array(__a , dtype=jnp.intaa ),
"attention_mask": jnp.array(__a , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def _lowercase (self : str , __a : list ):
UpperCAmelCase_ = [self._fetch_inputs(__a ) for ids in input_ids]
return zip(*__a )
def _lowercase (self : List[Any] , __a : list ):
UpperCAmelCase_ = [1 for _ in range(len(__a ) )]
while len(__a ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any , snake_case_ : List[Any]=None ) -> Any:
'''simple docstring'''
if seed is not None:
UpperCAmelCase_ = dataset.shuffle(seed=snake_case_ )
for i in range(len(snake_case_ ) // batch_size ):
UpperCAmelCase_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(snake_case_ )
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , **snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
def loss_fn(snake_case_ : Tuple ):
UpperCAmelCase_ = model_inputs.pop("start_labels" )
UpperCAmelCase_ = model_inputs.pop("end_labels" )
UpperCAmelCase_ = model_inputs.pop("pooled_labels" )
UpperCAmelCase_ = state.apply_fn(**snake_case_ , params=snake_case_ , dropout_rng=snake_case_ , train=snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = outputs
return state.loss_fn(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(snake_case_ )
UpperCAmelCase_ = jax.value_and_grad(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = grad_fn(state.params )
UpperCAmelCase_ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
UpperCAmelCase_ = jax.lax.pmean(snake_case_ , "batch" )
UpperCAmelCase_ = state.apply_gradients(grads=snake_case_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase_ ( snake_case_ : List[str] , **snake_case_ : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_ = model_inputs.pop("start_labels" )
UpperCAmelCase_ = model_inputs.pop("end_labels" )
UpperCAmelCase_ = model_inputs.pop("pooled_labels" )
UpperCAmelCase_ = state.apply_fn(**snake_case_ , params=state.params , train=snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = outputs
UpperCAmelCase_ = state.loss_fn(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class __A ( train_state.TrainState ):
a__ : Callable = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class __A :
a__ : Args
a__ : Callable
a__ : Callable
a__ : Callable
a__ : Callable
a__ : wandb
a__ : Callable = None
def _lowercase (self : str , __a : Union[str, Any] , __a : List[Any] , __a : Any , __a : int=None ):
UpperCAmelCase_ = model.params
UpperCAmelCase_ = TrainState.create(
apply_fn=model.__call__ , params=__a , tx=__a , loss_fn=__a , )
if ckpt_dir is not None:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = restore_checkpoint(__a , __a )
UpperCAmelCase_ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
UpperCAmelCase_ , UpperCAmelCase_ = build_tx(**__a )
UpperCAmelCase_ = train_state.TrainState(
step=__a , apply_fn=model.__call__ , params=__a , tx=__a , opt_state=__a , )
UpperCAmelCase_ = args
UpperCAmelCase_ = data_collator
UpperCAmelCase_ = lr
UpperCAmelCase_ = params
UpperCAmelCase_ = jax_utils.replicate(__a )
return state
def _lowercase (self : Tuple , __a : Dict , __a : str , __a : Any ):
UpperCAmelCase_ = self.args
UpperCAmelCase_ = len(__a ) // args.batch_size
UpperCAmelCase_ = jax.random.PRNGKey(0 )
UpperCAmelCase_ = jax.random.split(__a , jax.device_count() )
for epoch in range(args.max_epochs ):
UpperCAmelCase_ = jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase_ = get_batched_dataset(__a , args.batch_size , seed=__a )
UpperCAmelCase_ = 0
for batch in tqdm(__a , total=__a , desc=f"""Running EPOCH-{epoch}""" ):
UpperCAmelCase_ = self.data_collator(__a )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.train_step_fn(__a , __a , **__a )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
UpperCAmelCase_ = jax_utils.unreplicate(state.step )
UpperCAmelCase_ = running_loss.item() / i
UpperCAmelCase_ = self.scheduler_fn(state_step - 1 )
UpperCAmelCase_ = self.evaluate(__a , __a )
UpperCAmelCase_ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(__a ) )
self.logger.log(__a , commit=__a )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=__a )
def _lowercase (self : List[str] , __a : List[Any] , __a : Any ):
UpperCAmelCase_ = get_batched_dataset(__a , self.args.batch_size )
UpperCAmelCase_ = len(__a ) // self.args.batch_size
UpperCAmelCase_ = jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase_ = 0
for batch in tqdm(__a , total=__a , desc="Evaluating ... " ):
UpperCAmelCase_ = self.data_collator(__a )
UpperCAmelCase_ = self.val_step_fn(__a , **__a )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def _lowercase (self : Optional[int] , __a : List[str] , __a : int ):
UpperCAmelCase_ = jax_utils.unreplicate(__a )
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " )
self.model_save_fn(__a , params=state.params )
with open(os.path.join(__a , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__a , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(__a , "data_collator.joblib" ) )
with open(os.path.join(__a , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , __a )
print("DONE" )
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(snake_case_ , "flax_model.msgpack" ) , "rb" ) as f:
UpperCAmelCase_ = from_bytes(state.params , f.read() )
with open(os.path.join(snake_case_ , "opt_state.msgpack" ) , "rb" ) as f:
UpperCAmelCase_ = from_bytes(state.opt_state , f.read() )
UpperCAmelCase_ = joblib.load(os.path.join(snake_case_ , "args.joblib" ) )
UpperCAmelCase_ = joblib.load(os.path.join(snake_case_ , "data_collator.joblib" ) )
with open(os.path.join(snake_case_ , "training_state.json" ) , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
UpperCAmelCase_ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = num_train_steps - warmup_steps
UpperCAmelCase_ = optax.linear_schedule(init_value=snake_case_ , end_value=snake_case_ , transition_steps=snake_case_ )
UpperCAmelCase_ = optax.linear_schedule(init_value=snake_case_ , end_value=1E-7 , transition_steps=snake_case_ )
UpperCAmelCase_ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
def weight_decay_mask(snake_case_ : Any ):
UpperCAmelCase_ = traverse_util.flatten_dict(snake_case_ )
UpperCAmelCase_ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(snake_case_ )
UpperCAmelCase_ = scheduler_fn(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = optax.adamw(learning_rate=snake_case_ , weight_decay=snake_case_ , mask=snake_case_ )
return tx, lr
| 78 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
# load original model from timm
UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 )
UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: Any =get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : int = AlbertTokenizer
a__ : int = AlbertTokenizerFast
a__ : Union[str, Any] = True
a__ : Tuple = True
a__ : int = True
def _lowercase (self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : List[Any] , __a : Tuple ):
UpperCAmelCase_ = "this is a test"
UpperCAmelCase_ = "this is a test"
return input_text, output_text
def _lowercase (self : str ):
UpperCAmelCase_ = "<pad>"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : int ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__a ) , 30000 )
def _lowercase (self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase (self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AlbertTokenizer(__a , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1289] )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = AlbertTokenizer(__a )
UpperCAmelCase_ = tokenizer.encode("sequence builders" )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase (self : Tuple ):
# fmt: off
UpperCAmelCase_ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 78 | '''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = 0
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def _lowercase (self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
UpperCAmelCase_ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Tuple ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" )
def _lowercase (self : Optional[int] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowercase (self : Optional[int] ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : Optional[int] ):
class __A ( UpperCamelCase__ ):
a__ : str = True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 78 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Dict , __a : List[str]=-1 ):
# in NER datasets, the last column is usually reserved for NER label
UpperCAmelCase_ = label_idx
def _lowercase (self : Tuple , __a : Union[str, Any] , __a : Union[Split, str] ):
if isinstance(__a , __a ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(__a , f"""{mode}.txt""" )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(__a , encoding="utf-8" ) as f:
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
UpperCAmelCase_ = []
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = line.split(" " )
words.append(splits[0] )
if len(__a ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
return examples
def _lowercase (self : int , __a : TextIO , __a : TextIO , __a : List ):
UpperCAmelCase_ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(__a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCAmelCase_ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(__a )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def _lowercase (self : Optional[Any] , __a : str ):
if path:
with open(__a , "r" ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __A ( UpperCamelCase__ ):
def __init__(self : int ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase (self : Optional[int] , __a : str ):
if path:
with open(__a , "r" ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : List[str] , __a : Union[Split, str] ):
if isinstance(__a , __a ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(__a , f"""{mode}.txt""" )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(__a , encoding="utf-8" ) as f:
for sentence in parse_incr(__a ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(__a ) == len(__a )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
return examples
def _lowercase (self : Tuple , __a : TextIO , __a : TextIO , __a : List ):
UpperCAmelCase_ = 0
for sentence in parse_incr(__a ):
UpperCAmelCase_ = preds_list[example_id]
UpperCAmelCase_ = ""
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__a )
example_id += 1
def _lowercase (self : Optional[Any] , __a : str ):
if path:
with open(__a , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 78 | '''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.