code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Optional[int] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
a__ : str = {'mobilebert-uncased': 5_1_2}
a__ : int = {}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = MobileBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
UpperCamelCase__ = getattr(a , normalizer_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = tokenize_chinese_chars
UpperCamelCase__ = normalizer_class(**a )
UpperCamelCase__ = do_lower_case
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 80
|
'''simple docstring'''
lowercase__ : Dict = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 190
| 0
|
"""simple docstring"""
from __future__ import annotations
__A : Dict = 10
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = 1
A = max(__lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
A = [[] for _ in range(__lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
A = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCAmelCase )
# put each buckets' contents into list_of_ints
A = 0
for b in range(__lowerCAmelCase ):
for i in buckets[b]:
A = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
from __future__ import annotations
class __UpperCamelCase :
def __init__(self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str):
A , A = text, pattern
A , A = len(__SCREAMING_SNAKE_CASE), len(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : int):
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
# searches pattern in text and returns index positions
A = []
for i in range(self.textLen - self.patLen + 1):
A = self.mismatch_in_text(__SCREAMING_SNAKE_CASE)
if mismatch_index == -1:
positions.append(__SCREAMING_SNAKE_CASE)
else:
A = self.match_in_pattern(self.text[mismatch_index])
A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__A : int = 'ABAABA'
__A : Optional[Any] = 'AB'
__A : Any = BoyerMooreSearch(text, pattern)
__A : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 57
| 0
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 89
|
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Any=False ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =OmegaConf.load(_UpperCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_UpperCamelCase ) ) )
return config
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Union[str, Any]=None ) -> Optional[Any]:
"""simple docstring"""
if conf_path is None:
_SCREAMING_SNAKE_CASE ='./model_checkpoints/vqgan_only.yaml'
_SCREAMING_SNAKE_CASE =load_config(_UpperCamelCase , display=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =VQModel(**config.model.params )
if ckpt_path is None:
_SCREAMING_SNAKE_CASE ='./model_checkpoints/vqgan_only.pt'
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
if ".ckpt" in ckpt_path:
_SCREAMING_SNAKE_CASE =sd['state_dict']
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
model.to(_UpperCamelCase )
del sd
return model
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model.encode(_UpperCamelCase )
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_SCREAMING_SNAKE_CASE =model.decode(_UpperCamelCase )
return xrec
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str]=False ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =string.rsplit('.' , 1 )
if reload:
_SCREAMING_SNAKE_CASE =importlib.import_module(_UpperCamelCase )
importlib.reload(_UpperCamelCase )
return getattr(importlib.import_module(_UpperCamelCase , package=_UpperCamelCase ) , cls )
def _lowerCAmelCase ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=True , _UpperCamelCase : int=True ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =instantiate_from_config(_UpperCamelCase )
if sd is not None:
model.load_state_dict(_UpperCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if ckpt:
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location='cpu' )
_SCREAMING_SNAKE_CASE =pl_sd['global_step']
print(f"loaded model from global step {global_step}." )
else:
_SCREAMING_SNAKE_CASE ={'state_dict': None}
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =load_model_from_config(config.model , pl_sd['state_dict'] , gpu=_UpperCamelCase , eval_mode=_UpperCamelCase )['model']
return model, global_step
| 47
| 0
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=__a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=__a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=__a , help='where to store parsed gold_data_path file' , )
UpperCAmelCase : Tuple = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCAmelCase : str = json.load(__a )
for dpr_record in tqdm(__a ):
UpperCAmelCase : Tuple = dpr_record['question']
UpperCAmelCase : str = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(__a ) + '\n' )
if __name__ == "__main__":
main()
| 355
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase__ = 3
def UpperCamelCase( UpperCAmelCase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase : Union[str, Any] = random.randrange(3 , UpperCAmelCase_ )
if pow(UpperCAmelCase_ , 2 , UpperCAmelCase_ ) == 1:
continue
if pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) == 1:
continue
return g
def UpperCamelCase( UpperCAmelCase_ ):
print('Generating prime p...' )
UpperCAmelCase : str = rabin_miller.generate_large_prime(UpperCAmelCase_ ) # select large prime number.
UpperCAmelCase : List[str] = primitive_root(UpperCAmelCase_ ) # one primitive root on modulo p.
UpperCAmelCase : List[Any] = random.randrange(3 , UpperCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase : List[Any] = cryptomath.find_mod_inverse(pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
UpperCAmelCase : Tuple = (key_size, e_a, e_a, p)
UpperCAmelCase : Optional[int] = (key_size, d)
return public_key, private_key
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase : Dict = generate_key(UpperCAmelCase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def UpperCamelCase( ):
print('Making key files...' )
make_key_files('elgamal' , 20_48 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 280
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_a = 8
def __a ( __lowerCamelCase, __lowerCamelCase=BITS ):
UpperCAmelCase_ : Any = x.device
UpperCAmelCase_ : str = (x * 255).int().clamp(0, 255 )
UpperCAmelCase_ : Tuple = 2 ** torch.arange(bits - 1, -1, -1, device=_a )
UpperCAmelCase_ : Tuple = rearrange(_a, "d -> d 1 1" )
UpperCAmelCase_ : Any = rearrange(_a, "b c h w -> b c 1 h w" )
UpperCAmelCase_ : List[str] = ((x & mask) != 0).float()
UpperCAmelCase_ : Optional[int] = rearrange(_a, "b c d h w -> b (c d) h w" )
UpperCAmelCase_ : Union[str, Any] = bits * 2 - 1
return bits
def __a ( __lowerCamelCase, __lowerCamelCase=BITS ):
UpperCAmelCase_ : str = x.device
UpperCAmelCase_ : Any = (x > 0).int()
UpperCAmelCase_ : Any = 2 ** torch.arange(bits - 1, -1, -1, device=_a, dtype=torch.intaa )
UpperCAmelCase_ : List[str] = rearrange(_a, "d -> d 1 1" )
UpperCAmelCase_ : Dict = rearrange(_a, "b (c d) h w -> b c d h w", d=8 )
UpperCAmelCase_ : Optional[Any] = reduce(x * mask, "b c d h w -> b c h w", "sum" )
return (dec / 255).clamp(0.0, 1.0 )
def __a ( self, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 0.0, __lowerCamelCase = True, __lowerCamelCase=None, __lowerCamelCase = True, ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_ : List[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_ : int = self.alphas_cumprod[timestep]
UpperCAmelCase_ : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_ : Tuple = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_ : Tuple = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ : Dict = torch.clamp(_a, -scale, _a )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_ : Optional[int] = self._get_variance(_a, _a )
UpperCAmelCase_ : Union[str, Any] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_ : Union[str, Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : Optional[int] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_ : List[str] = model_output.device if torch.is_tensor(_a ) else '''cpu'''
UpperCAmelCase_ : Tuple = torch.randn(model_output.shape, dtype=model_output.dtype, generator=_a ).to(_a )
UpperCAmelCase_ : Optional[Any] = self._get_variance(_a, _a ) ** 0.5 * eta * noise
UpperCAmelCase_ : Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_a, pred_original_sample=_a )
def __a ( self, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase="epsilon", __lowerCamelCase=None, __lowerCamelCase = True, ):
UpperCAmelCase_ : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ : List[str] = torch.split(_a, sample.shape[1], dim=1 )
else:
UpperCAmelCase_ : Union[str, Any] = None
# 1. compute alphas, betas
UpperCAmelCase_ : Union[str, Any] = self.alphas_cumprod[t]
UpperCAmelCase_ : Optional[int] = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_ : Optional[Any] = 1 - alpha_prod_t
UpperCAmelCase_ : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_ : Union[str, Any] = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCAmelCase_ : Union[str, Any] = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ : str = torch.clamp(_a, -scale, _a )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ : Tuple = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_ : Any = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ : str = 0
if t > 0:
UpperCAmelCase_ : int = torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=_a ).to(model_output.device )
UpperCAmelCase_ : List[Any] = (self._get_variance(_a, predicted_variance=_a ) ** 0.5) * noise
UpperCAmelCase_ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_a, pred_original_sample=_a )
class A_ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ = 1.0 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Union[str, Any] = bit_scale
UpperCAmelCase_ : Any = (
ddim_bit_scheduler_step if isinstance(lowercase_ , lowercase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self , lowercase_ = 256 , lowercase_ = 256 , lowercase_ = 50 , lowercase_ = None , lowercase_ = 1 , lowercase_ = "pil" , lowercase_ = True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowercase_ , )
UpperCAmelCase_ : str = decimal_to_bits(lowercase_ ) * self.bit_scale
UpperCAmelCase_ : Dict = latents.to(self.device )
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCAmelCase_ : Any = self.unet(lowercase_ , lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
UpperCAmelCase_ : Tuple = bits_to_decimal(lowercase_ )
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 61
|
"""simple docstring"""
import numpy as np
def __lowercase ( _a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 0
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_UpperCAmelCase = 'src/diffusers'
# Matches is_xxx_available()
_UpperCAmelCase = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
_UpperCAmelCase = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
_UpperCAmelCase = '\n{0} = None\n'
_UpperCAmelCase = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
_UpperCAmelCase = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Dict:
UpperCamelCase_ = _re_backend.findall(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 0:
return None
return "_and_".join(UpperCamelCase_ )
def lowerCAmelCase_ ( ) -> Tuple:
with open(os.path.join(UpperCamelCase_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase_ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase_ = 0
UpperCamelCase_ = {}
# Go through the end of the file
while line_index < len(UpperCamelCase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCamelCase_ = []
# Until we unindent, add backend objects to the list
while line_index < len(UpperCamelCase_ ) and len(lines[line_index] ) > 1:
UpperCamelCase_ = lines[line_index]
UpperCamelCase_ = _re_single_line_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCamelCase_ ) > 0:
UpperCamelCase_ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
if name.isupper():
return DUMMY_CONSTANT.format(UpperCamelCase_ )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCamelCase_ , UpperCamelCase_ )
else:
return DUMMY_CLASS.format(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_=None ) -> Union[str, Any]:
if backend_specific_objects is None:
UpperCamelCase_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase_ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase_ = "[" + ", ".join(F'''"{b}"''' for b in backend.split("_and_" ) ) + "]"
UpperCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCamelCase_ , UpperCamelCase_ ) for o in objects] )
UpperCamelCase_ = dummy_file
return dummy_files
def lowerCAmelCase_ ( UpperCamelCase_=False ) -> int:
UpperCamelCase_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase_ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCamelCase_ = os.path.join(UpperCamelCase_ , "utils" )
UpperCamelCase_ = {
backend: os.path.join(UpperCamelCase_ , F'''dummy_{short_names.get(UpperCamelCase_ , UpperCamelCase_ )}_objects.py''' )
for backend in dummy_files.keys()
}
UpperCamelCase_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase_ = f.read()
else:
UpperCamelCase_ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(UpperCamelCase_ , UpperCamelCase_ )}_objects.py as the main '''
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'''diffusers.utils.dummy_{short_names.get(UpperCamelCase_ , UpperCamelCase_ )}_objects.py. Run `make fix-copies` '''
"to fix this." )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCAmelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 328
|
import argparse
import json
from tqdm import tqdm
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=UpperCamelCase_ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=UpperCamelCase_ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=UpperCamelCase_ , help="where to store parsed gold_data_path file" , )
UpperCamelCase_ = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
UpperCamelCase_ = json.load(UpperCamelCase_ )
for dpr_record in tqdm(UpperCamelCase_ ):
UpperCamelCase_ = dpr_record["question"]
UpperCamelCase_ = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(UpperCamelCase_ ) + "\n" )
if __name__ == "__main__":
main()
| 328
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "markuplm"
def __init__( self : Dict , __lowercase : Any=30522 , __lowercase : int=768 , __lowercase : Tuple=12 , __lowercase : Optional[int]=12 , __lowercase : Dict=3072 , __lowercase : Union[str, Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Any=0.1 , __lowercase : int=512 , __lowercase : List[Any]=2 , __lowercase : List[str]=0.0_2 , __lowercase : Union[str, Any]=1E-12 , __lowercase : List[str]=0 , __lowercase : List[Any]=0 , __lowercase : Optional[Any]=2 , __lowercase : Dict=256 , __lowercase : Optional[int]=1024 , __lowercase : List[Any]=216 , __lowercase : Any=1001 , __lowercase : Optional[int]=32 , __lowercase : Dict=50 , __lowercase : int="absolute" , __lowercase : List[Any]=True , __lowercase : Tuple=None , **__lowercase : Dict , ):
"""simple docstring"""
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , )
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =hidden_act
__lowercase =intermediate_size
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =position_embedding_type
__lowercase =use_cache
__lowercase =classifier_dropout
# additional properties
__lowercase =max_depth
__lowercase =max_xpath_tag_unit_embeddings
__lowercase =max_xpath_subs_unit_embeddings
__lowercase =tag_pad_id
__lowercase =subs_pad_id
__lowercase =xpath_unit_hidden_size
| 141
|
'''simple docstring'''
import datasets
UpperCAmelCase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
UpperCAmelCase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
UpperCAmelCase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : List[str] ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def snake_case ( self : List[str] , __lowercase : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
| 141
| 1
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def __magic_name__ ( *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : int ):
"""simple docstring"""
pass
def lowerCamelCase__ ( a ) -> Optional[Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase__ : Tuple = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__ ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A: int = INVOICE_URL
_A: Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , '''''' ) ) )
_A: List[Any] = '''What is the placebo?'''
_A: List[str] = [
{
'''image''': load_image(lowerCAmelCase_ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: Optional[Any] = dqa_pipeline(lowerCAmelCase_ , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [
[
{'''score''': ANY(lowerCAmelCase_ ), '''answer''': ANY(lowerCAmelCase_ ), '''start''': ANY(lowerCAmelCase_ ), '''end''': ANY(lowerCAmelCase_ )},
{'''score''': ANY(lowerCAmelCase_ ), '''answer''': ANY(lowerCAmelCase_ ), '''start''': ANY(lowerCAmelCase_ ), '''end''': ANY(lowerCAmelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: int = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
_A: Optional[Any] = INVOICE_URL
_A: Tuple = '''How many cats are there?'''
_A: str = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 3_8, '''end''': 3_9},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 3_8, '''end''': 4_0},
]
_A: List[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
_A: Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_A: Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_A: List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
_A: str = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_A: Optional[int] = []
_A: Tuple = []
_A: List[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
_A: str = INVOICE_URL
_A: Any = '''What is the invoice number?'''
_A: List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_A: List[Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_A: Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Optional[int] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=5_0 , )
_A: List[Any] = INVOICE_URL
_A: Union[str, Any] = '''What is the invoice number?'''
_A: Any = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_A: int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_A: int = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Any = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase_ )
_A: int = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase_ , revision='''3dc6de3''' , )
_A: Optional[Any] = INVOICE_URL
_A: List[str] = '''What is the invoice number?'''
_A: Optional[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
_A: Optional[Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
_A: Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
]
]
* 2 , )
_A: Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , '''''' ) ) )
# This model should also work if `image` is set to None
_A: Any = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[str] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase_ )
_A: Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase_ , revision='''3dc6de3''' , max_seq_len=5_0 , )
_A: List[str] = INVOICE_URL
_A: Any = '''What is the invoice number?'''
_A: int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_A: str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
_A: Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , '''''' ) ) )
# This model should also work if `image` is set to None
_A: Optional[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
@slow
@require_torch
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Any = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
_A: Tuple = INVOICE_URL
_A: Tuple = '''What is the invoice number?'''
_A: Optional[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
pass
| 301
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
__UpperCamelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
__UpperCamelCase : str = "audio"
__UpperCamelCase : str = "transcription"
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , lowerCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
_A: Optional[int] = copy.deepcopy(self )
_A: str = self.input_schema.copy()
_A: List[str] = features[self.audio_column]
_A: Dict = input_schema
return task_template
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 301
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='speech_to_text'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] , a : Optional[int]=1_0000 , a : Any=12 , a : List[Any]=2048 , a : Any=4 , a : str=6 , a : List[str]=2048 , a : str=4 , a : Tuple=0.0 , a : Dict=0.0 , a : Union[str, Any]=True , a : Any=True , a : Tuple="relu" , a : int=256 , a : Dict=0.1 , a : int=0.0 , a : List[str]=0.0 , a : Dict=0.02 , a : Tuple=2 , a : Tuple=True , a : Optional[Any]=1 , a : int=0 , a : Tuple=2 , a : str=6000 , a : List[Any]=1024 , a : int=2 , a : Optional[Any]=(5, 5) , a : Dict=1024 , a : int=80 , a : Optional[int]=1 , **a : str , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : int = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = decoder_layers
SCREAMING_SNAKE_CASE : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = dropout
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE : str = activation_function
SCREAMING_SNAKE_CASE : Any = init_std
SCREAMING_SNAKE_CASE : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE : int = decoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : str = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : Union[str, Any] = max_source_positions
SCREAMING_SNAKE_CASE : str = max_target_positions
SCREAMING_SNAKE_CASE : Optional[int] = num_conv_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = list(a )
SCREAMING_SNAKE_CASE : Optional[int] = conv_channels
SCREAMING_SNAKE_CASE : Dict = input_feat_per_channel
SCREAMING_SNAKE_CASE : Optional[Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , **a , )
| 76
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def __UpperCAmelCase ( a_: float, a_: float, a_: float, a_: float, a_: float ):
if not all(isinstance(a_, (float, int) ) for val in locals().values() ):
_UpperCAmelCase : str = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(a_ )
_UpperCAmelCase : Optional[int] = ((x * distance) / (z + distance)) * scale
_UpperCAmelCase : Optional[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __UpperCAmelCase ( a_: float, a_: float, a_: float, a_: str, a_: float ):
if not isinstance(a_, a_ ):
raise TypeError("Axis must be a str" )
_UpperCAmelCase : Optional[int] = locals()
del input_variables["axis"]
if not all(isinstance(a_, (float, int) ) for val in input_variables.values() ):
_UpperCAmelCase : str = (
"Input values except axis must either be float or int: "
f"""{list(input_variables.values() )}"""
)
raise TypeError(a_ )
_UpperCAmelCase : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_UpperCAmelCase : Optional[int] = x * math.cos(a_ ) - y * math.sin(a_ )
_UpperCAmelCase : int = y * math.cos(a_ ) + x * math.sin(a_ )
_UpperCAmelCase : List[str] = z
elif axis == "x":
_UpperCAmelCase : str = y * math.cos(a_ ) - z * math.sin(a_ )
_UpperCAmelCase : Union[str, Any] = z * math.cos(a_ ) + y * math.sin(a_ )
_UpperCAmelCase : Union[str, Any] = x
elif axis == "y":
_UpperCAmelCase : Tuple = x * math.cos(a_ ) - z * math.sin(a_ )
_UpperCAmelCase : Optional[int] = z * math.cos(a_ ) + x * math.sin(a_ )
_UpperCAmelCase : List[str] = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(f'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 351
|
'''simple docstring'''
def __UpperCAmelCase ( a_: str ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
_UpperCAmelCase : Optional[Any] = ""
while len(a_ ) % 3 != 0:
_UpperCAmelCase : List[Any] = "0" + bin_string
_UpperCAmelCase : Dict = [
bin_string[index : index + 3]
for index in range(len(a_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_UpperCAmelCase : Optional[Any] = 0
for index, val in enumerate(a_ ):
oct_val += int(2 ** (2 - index) * int(a_ ) )
oct_string += str(a_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 17
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = '''perceiver'''
def __init__( self , _A=256 , _A=1280 , _A=768 , _A=1 , _A=26 , _A=8 , _A=8 , _A=None , _A=None , _A="kv" , _A=1 , _A=1 , _A="gelu" , _A=0.1 , _A=0.0_2 , _A=1e-1_2 , _A=True , _A=262 , _A=2048 , _A=56 , _A=[368, 496] , _A=16 , _A=1920 , _A=16 , _A=[1, 16, 224, 224] , **_A , ):
super().__init__(**_A )
__A : Union[str, Any] = num_latents
__A : int = d_latents
__A : Union[str, Any] = d_model
__A : int = num_blocks
__A : Union[str, Any] = num_self_attends_per_block
__A : Optional[int] = num_self_attention_heads
__A : Optional[Any] = num_cross_attention_heads
__A : List[Any] = qk_channels
__A : List[Any] = v_channels
__A : str = cross_attention_shape_for_attention
__A : Any = self_attention_widening_factor
__A : Optional[int] = cross_attention_widening_factor
__A : Union[str, Any] = hidden_act
__A : List[str] = attention_probs_dropout_prob
__A : int = initializer_range
__A : Optional[Any] = layer_norm_eps
__A : Tuple = use_query_residual
# masked language modeling attributes
__A : Optional[Any] = vocab_size
__A : Optional[Any] = max_position_embeddings
# image classification attributes
__A : List[str] = image_size
# flow attributes
__A : int = train_size
# multimodal autoencoding attributes
__A : List[str] = num_frames
__A : Union[str, Any] = audio_samples_per_frame
__A : Optional[Any] = samples_per_patch
__A : Union[str, Any] = output_shape
class _A( snake_case__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
if self.task == "multiple-choice":
__A : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self ):
return 1e-4
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = -1 , _A = False , _A = None , _A = 3 , _A = 40 , _A = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_A , _A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__A : List[str] = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__A : List[Any] = preprocessor.num_special_tokens_to_add(_A )
__A : Optional[Any] = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
__A : Optional[int] = [' '.join(['a'] ) * seq_length] * batch_size
__A : Dict = dict(preprocessor(_A , return_tensors=_A ) )
__A : Any = inputs.pop('input_ids' )
return inputs
elif isinstance(_A , _A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__A : str = compute_effective_axis_dimension(_A , fixed_dimension=OnnxConfig.default_fixed_batch )
__A : Dict = self._generate_dummy_images(_A , _A , _A , _A )
__A : Optional[Any] = dict(preprocessor(images=_A , return_tensors=_A ) )
__A : Any = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 280
|
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[int]:
__A : Optional[int] = int(a )
# Initialize Result
__A : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(a ):
# Find denominations
while int(a ) >= int(a ):
total_value -= int(a )
answer.append(a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCAmelCase : List[Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 280
| 1
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A_ = logging.get_logger(__name__)
A_ = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , snake_case : str=None , **snake_case : Tuple ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A__ : Optional[Any] = model
A__ : Optional[int] = kwargs.get("""model_save_dir""" , snake_case )
A__ : str = kwargs.get("""latest_model_name""" , snake_case )
def __call__( self : str , **snake_case : Optional[int] ):
'''simple docstring'''
A__ : List[Any] = {k: np.array(snake_case ) for k, v in kwargs.items()}
return self.model.run(snake_case , snake_case )
@staticmethod
def _UpperCamelCase ( snake_case : Union[str, Path] , snake_case : str=None , snake_case : Any=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A__ : List[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(snake_case , providers=[provider] , sess_options=snake_case )
def _UpperCamelCase ( self : Tuple , snake_case : Union[str, Path] , snake_case : Optional[str] = None , **snake_case : Dict ):
'''simple docstring'''
A__ : Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A__ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
A__ : Optional[Any] = Path(snake_case ).joinpath(snake_case )
try:
shutil.copyfile(snake_case , snake_case )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A__ : List[str] = self.model_save_dir.joinpath(snake_case )
if src_path.exists():
A__ : Tuple = Path(snake_case ).joinpath(snake_case )
try:
shutil.copyfile(snake_case , snake_case )
except shutil.SameFileError:
pass
def _UpperCamelCase ( self : List[Any] , snake_case : Union[str, os.PathLike] , **snake_case : Optional[int] , ):
'''simple docstring'''
if os.path.isfile(snake_case ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(snake_case , exist_ok=snake_case )
# saving model weights/files
self._save_pretrained(snake_case , **snake_case )
@classmethod
def _UpperCamelCase ( cls : List[str] , snake_case : Union[str, Path] , snake_case : Optional[Union[bool, str, None]] = None , snake_case : Optional[Union[str, None]] = None , snake_case : bool = False , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Optional["ort.SessionOptions"] = None , **snake_case : str , ):
'''simple docstring'''
A__ : Optional[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case ):
A__ : Dict = OnnxRuntimeModel.load_model(
os.path.join(snake_case , snake_case ) , provider=snake_case , sess_options=snake_case )
A__ : Any = Path(snake_case )
# load model from hub
else:
# download model
A__ : Union[str, Any] = hf_hub_download(
repo_id=snake_case , filename=snake_case , use_auth_token=snake_case , revision=snake_case , cache_dir=snake_case , force_download=snake_case , )
A__ : List[Any] = Path(snake_case ).parent
A__ : Optional[int] = Path(snake_case ).name
A__ : Any = OnnxRuntimeModel.load_model(snake_case , provider=snake_case , sess_options=snake_case )
return cls(model=snake_case , **snake_case )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , snake_case : Union[str, Path] , snake_case : bool = True , snake_case : Optional[str] = None , snake_case : Optional[str] = None , **snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : Dict = None
if len(str(snake_case ).split("""@""" ) ) == 2:
A__ : str = model_id.split("""@""" )
return cls._from_pretrained(
model_id=snake_case , revision=snake_case , cache_dir=snake_case , force_download=snake_case , use_auth_token=snake_case , **snake_case , )
| 353
|
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296
| 0
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowercase__ : Any = "src/diffusers"
# Matches is_xxx_available()
lowercase__ : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowercase__ : Dict = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowercase__ : str = "\n{0} = None\n"
lowercase__ : str = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowercase__ : int = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def A_ ( snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase = _re_backend.findall(snake_case )
if len(snake_case ) == 0:
return None
return "_and_".join(snake_case )
def A_ ( ) -> Optional[Any]:
'''simple docstring'''
with open(os.path.join(snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(snake_case ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(snake_case ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def A_ ( snake_case : List[Any] , snake_case : Dict ) -> Dict:
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(snake_case , snake_case )
else:
return DUMMY_CLASS.format(snake_case , snake_case )
def A_ ( snake_case : str=None ) -> List[str]:
'''simple docstring'''
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '''[''' + ''', '''.join(f"\"{b}\"" for b in backend.split('''_and_''' ) ) + ''']'''
__UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(snake_case , snake_case ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def A_ ( snake_case : str=False ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(snake_case , '''utils''' )
__UpperCamelCase = {
backend: os.path.join(snake_case , f"dummy_{short_names.get(snake_case , snake_case )}_objects.py" )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(snake_case ):
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(snake_case , snake_case )}_objects.py as the main "
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f"diffusers.utils.dummy_{short_names.get(snake_case , snake_case )}_objects.py. Run `make fix-copies` "
'''to fix this.''' )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ : Union[str, Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 328
|
from math import factorial
def A_ ( snake_case : int = 100 ) -> int:
'''simple docstring'''
return sum(int(snake_case ) for x in str(factorial(snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 328
| 1
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
lowerCamelCase_ : Tuple = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
lowerCamelCase_ : Tuple = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = CamembertTokenizer
__lowerCAmelCase = CamembertTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
a =CamembertTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a ='''<pad>'''
a =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__A ) , 1004 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =CamembertTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
a =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
a ='''I was born in 92000, and this is falsé.'''
a =tokenizer.encode(__A )
a =rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
a =tokenizer.encode(__A , add_special_tokens=__A )
a =rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a =tokenizer.convert_ids_to_tokens(__A )
a =rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
a =self.get_tokenizer()
a =self.get_rust_tokenizer()
a ='''I was born in 92000, and this is falsé.'''
a =tokenizer.tokenize(__A )
a =rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
a =tokenizer.encode(__A , add_special_tokens=__A )
a =rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
a =self.get_rust_tokenizer()
a =tokenizer.encode(__A )
a =rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
# fmt: off
a ={'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a =[
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=__A , )
| 215
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "WhisperFeatureExtractor"
__lowerCAmelCase = "WhisperTokenizer"
def __init__( self , __A , __A ) -> Dict:
super().__init__(__A , __A )
a =self.feature_extractor
a =False
def SCREAMING_SNAKE_CASE ( self , __A=None , __A=None , __A=True ) -> int:
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self , *__A , **__A ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
a =kwargs.pop('''audio''' , __A )
a =kwargs.pop('''sampling_rate''' , __A )
a =kwargs.pop('''text''' , __A )
if len(__A ) > 0:
a =args[0]
a =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
a =self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
a =self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a =encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Union[str, Any]:
return self.tokenizer.decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(__A , return_tensors=__A )
| 215
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# Return True if there is node that has not iterated.
__lowerCAmelCase = [False] * len(_lowerCAmelCase )
__lowerCAmelCase = [s]
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCAmelCase )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [-1] * (len(_lowerCAmelCase ))
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = float("""Inf""" )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(_lowerCAmelCase , graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
for i in range(len(_lowerCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 301
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 1
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class UpperCAmelCase_ ( _lowerCamelCase ):
lowercase__ = '''align_text_model'''
def __init__( self : Any , snake_case_ : Any=30_522 , snake_case_ : Dict=768 , snake_case_ : Tuple=12 , snake_case_ : Optional[Any]=12 , snake_case_ : Optional[int]=3_072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Dict=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=512 , snake_case_ : Optional[int]=2 , snake_case_ : Optional[Any]=0.02 , snake_case_ : str=1e-12 , snake_case_ : Any=0 , snake_case_ : Optional[int]="absolute" , snake_case_ : Union[str, Any]=True , **snake_case_ : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = pad_token_id
@classmethod
def __magic_name__ ( cls : Optional[Any] , snake_case_ : Union[str, os.PathLike] , **snake_case_ : Tuple ) -> List[Any]:
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_ )
A__, A__ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
A__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class UpperCAmelCase_ ( _lowerCamelCase ):
lowercase__ = '''align_vision_model'''
def __init__( self : Tuple , snake_case_ : int = 3 , snake_case_ : int = 600 , snake_case_ : float = 2.0 , snake_case_ : float = 3.1 , snake_case_ : int = 8 , snake_case_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , snake_case_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , snake_case_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , snake_case_ : List[int] = [] , snake_case_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , snake_case_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , snake_case_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , snake_case_ : float = 0.25 , snake_case_ : str = "swish" , snake_case_ : int = 2_560 , snake_case_ : str = "mean" , snake_case_ : float = 0.02 , snake_case_ : float = 0.001 , snake_case_ : float = 0.99 , snake_case_ : float = 0.2 , **snake_case_ : List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
A__ = num_channels
A__ = image_size
A__ = width_coefficient
A__ = depth_coefficient
A__ = depth_divisor
A__ = kernel_sizes
A__ = in_channels
A__ = out_channels
A__ = depthwise_padding
A__ = strides
A__ = num_block_repeats
A__ = expand_ratios
A__ = squeeze_expansion_ratio
A__ = hidden_act
A__ = hidden_dim
A__ = pooling_type
A__ = initializer_range
A__ = batch_norm_eps
A__ = batch_norm_momentum
A__ = drop_connect_rate
A__ = sum(lowercase_ ) * 4
@classmethod
def __magic_name__ ( cls : List[str] , snake_case_ : Union[str, os.PathLike] , **snake_case_ : str ) -> List[str]:
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_ )
A__, A__ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class UpperCAmelCase_ ( _lowerCamelCase ):
lowercase__ = '''align'''
lowercase__ = True
def __init__( self : int , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None , snake_case_ : List[str]=640 , snake_case_ : Any=1.0 , snake_case_ : Union[str, Any]=0.02 , **snake_case_ : int , ) -> str:
'''simple docstring'''
super().__init__(**lowercase_ )
if text_config is None:
A__ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
A__ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
A__ = AlignTextConfig(**lowercase_ )
A__ = AlignVisionConfig(**lowercase_ )
A__ = projection_dim
A__ = temperature_init_value
A__ = initializer_range
@classmethod
def __magic_name__ ( cls : Any , snake_case_ : AlignTextConfig , snake_case_ : AlignVisionConfig , **snake_case_ : int ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def __magic_name__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 356
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10_00 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A__ = n - 1
A__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A__ = 0
while count < prec:
A__ = random.randint(2 , n - 1 )
A__ = bin_exp_mod(lowercase_ , lowercase_ , lowercase_ )
if b != 1:
A__ = True
for _ in range(lowercase_ ):
if b == n - 1:
A__ = False
break
A__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 230
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = ["keras_nlp"]
def __init__( self : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Union[str, Any] ):
requires_backends(self , ['keras_nlp'] )
| 315
|
"""simple docstring"""
from math import sqrt
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(1, int(sqrt(UpperCamelCase_) + 1)):
if n % i == 0 and i != sqrt(UpperCamelCase_):
total += i + n // i
elif i == sqrt(UpperCamelCase_):
total += i
return total - n
def _A ( UpperCamelCase_ : int = 10000) -> int:
'''simple docstring'''
__lowercase = sum(
i
for i in range(1, UpperCamelCase_)
if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = [0 for i in range(len(snake_case_ ) )]
# initialize interval's left pointer and right pointer
UpperCAmelCase_ , UpperCAmelCase_ = 0, 0
for i in range(1 , len(snake_case_ ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCAmelCase_ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCAmelCase_ = min_edge
while go_next(snake_case_ , snake_case_ , snake_case_ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCAmelCase_ , UpperCAmelCase_ = i, i + z_result[i] - 1
return z_result
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(snake_case_ ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCAmelCase_ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case_ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[str] ={'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
SCREAMING_SNAKE_CASE_: Optional[int] ={
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(snake_case_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __A ( UpperCamelCase__ ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self : Any , __a : List[Any] , __a : Dict , __a : int="<|endoftext|>" , __a : Union[str, Any]="<|endoftext|>" , __a : int="<|startoftext|>" , __a : Tuple="<|endoftext|>" , __a : Optional[int]=False , **__a : int , ):
super().__init__(
unk_token=__a , pad_token=__a , bos_token=__a , eos_token=__a , do_clean_text=__a , **__a , )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(__a , __a )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _lowercase (self : Optional[Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _lowercase (self : List[Any] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _lowercase (self : List[Any] , __a : int ):
return self.subword_tokenizer.tokenize(__a , clean=self.do_clean_text )
def _lowercase (self : List[Any] , __a : List[str] ):
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def _lowercase (self : int , __a : List[Any] ):
return self.subword_tokenizer.convert_id_to_token(__a )
def _lowercase (self : Dict , __a : str ):
UpperCAmelCase_ = "".join(__a ).strip()
return out_string
def _lowercase (self : int , __a : "Conversation" ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def _lowercase (self : int , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = 0
if os.path.isdir(__a ):
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__a , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(__a ) + "\n" )
index += 1
with open(__a , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __a )
return vocab_file, emoji_file
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , __a : Dict , __a : Any , __a : int ):
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(__a ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__(self : Dict ):
return len(self.ids_to_tokens )
def _lowercase (self : str , __a : Union[str, Any] ):
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , __a )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : str=False ):
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(__a , __a )
if clean:
UpperCAmelCase_ = self.clean_text(__a )
def check_simbol(__a : List[Any] ):
UpperCAmelCase_ = x.encode()
if len(__a ) == 1 and len(__a ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(__a : Tuple ):
UpperCAmelCase_ = x.encode()
if len(__a ) == 1 and len(__a ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(__a ):
UpperCAmelCase_ = min(len(__a ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(__a , __a , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__a ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(__a , key=lambda __a : x[0] )[0]
result.append(__a )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(__a ):
result.append("<KIGOU>" )
elif checkuae(__a ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def _lowercase (self : int , __a : Optional[Any] , __a : Optional[int]="\n" ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__a )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__a )
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(__a )
return text
| 106
| 0
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase=16, lowerCamelCase = 10, lowerCamelCase = 2 ):
def get_dataset(lowerCamelCase ):
lowercase :int = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(_SCREAMING_SNAKE_CASE, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
lowercase :Any = get_dataset(_SCREAMING_SNAKE_CASE )
lowercase :Any = get_dataset(_SCREAMING_SNAKE_CASE )
lowercase :List[Any] = DataLoader(_SCREAMING_SNAKE_CASE, shuffle=_SCREAMING_SNAKE_CASE, batch_size=_SCREAMING_SNAKE_CASE, num_workers=4 )
lowercase :Optional[int] = DataLoader(_SCREAMING_SNAKE_CASE, shuffle=_SCREAMING_SNAKE_CASE, batch_size=_SCREAMING_SNAKE_CASE, num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None ):
lowercase :Any = []
for epoch in range(_SCREAMING_SNAKE_CASE ):
# Train quickly
model.train()
for batch in dataloader:
lowercase , lowercase :Optional[int] = batch
lowercase :int = model(_SCREAMING_SNAKE_CASE )
lowercase :Dict = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowerCAmelCase ( nn.Module):
def __init__( self: int ):
super().__init__()
lowercase :Optional[Any] = nn.Parameter(torch.randn(1 ) )
lowercase :Tuple = nn.Parameter(torch.randn(1 ) )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Optional[Any] ):
return x * self.a + self.b
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase :Optional[int] = DummyModel()
lowercase :Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase , lowercase :Tuple = dummy_dataloaders()
lowercase :Union[str, Any] = ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase__ , automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
lowercase :List[Any] = Accelerator(project_config=lowerCamelCase__ )
lowercase , lowercase , lowercase , lowercase :int = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase :int = DummyModel()
lowercase :str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase , lowercase :List[Any] = dummy_dataloaders()
# Train baseline
lowercase :Union[str, Any] = Accelerator()
lowercase , lowercase , lowercase , lowercase :Any = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
lowercase :Dict = os.path.join(lowerCamelCase__ , "initial" )
accelerator.save_state(lowerCamelCase__ )
((lowercase) , (lowercase)) :int = model.a.item(), model.b.item()
lowercase :Dict = optimizer.state_dict()
lowercase :Any = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowercase) , (lowercase)) :Union[str, Any] = model.a.item(), model.b.item()
lowercase :List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase :Dict = DummyModel()
lowercase :int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase , lowercase :Optional[Any] = dummy_dataloaders()
lowercase :Dict = Accelerator()
lowercase , lowercase , lowercase , lowercase :str = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(lowerCamelCase__ )
((lowercase) , (lowercase)) :str = model.a.item(), model.b.item()
lowercase :str = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase :List[str] = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
lowercase :Any = os.path.join(lowerCamelCase__ , "checkpoint" )
accelerator.save_state(lowerCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__ )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowercase) , (lowercase)) :List[Any] = model.a.item(), model.b.item()
lowercase :str = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase :Optional[int] = DummyModel()
lowercase :List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase , lowercase :Optional[int] = dummy_dataloaders()
lowercase :Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
lowercase :int = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
lowercase , lowercase , lowercase , lowercase :str = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
((lowercase) , (lowercase)) :str = model.a.item(), model.b.item()
lowercase :Union[str, Any] = optimizer.state_dict()
lowercase :Dict = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowercase) , (lowercase)) :List[Any] = model.a.item(), model.b.item()
lowercase :Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase :List[str] = DummyModel()
lowercase :Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase , lowercase :int = dummy_dataloaders()
lowercase :Union[str, Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase__ )
lowercase :int = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
lowercase , lowercase , lowercase , lowercase :List[str] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) )
((lowercase) , (lowercase)) :Union[str, Any] = model.a.item(), model.b.item()
lowercase :Optional[int] = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase :Union[str, Any] = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowercase) , (lowercase)) :Optional[Any] = model.a.item(), model.b.item()
lowercase :Union[str, Any] = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Optional[Any] = torch.tensor([1, 2, 3] )
lowercase :Union[str, Any] = torch.tensor([2, 3, 4] )
lowercase :List[str] = DummyModel()
lowercase :Dict = torch.optim.Adam(net.parameters() )
lowercase :str = Accelerator()
with self.assertRaises(lowerCamelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase :Any = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase :Optional[Any] = DummyModel()
lowercase :List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase :List[str] = torch.optim.lr_scheduler.StepLR(lowerCamelCase__ , step_size=1 , gamma=0.99 )
lowercase , lowercase :Union[str, Any] = dummy_dataloaders()
lowercase :int = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
lowercase :Any = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
lowercase , lowercase , lowercase , lowercase , lowercase :Tuple = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
lowercase :List[str] = scheduler.state_dict()
train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(lowerCamelCase__ , scheduler.state_dict() )
def SCREAMING_SNAKE_CASE ( self: Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase :Optional[Any] = DummyModel()
lowercase :List[str] = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ , total_limit=2 )
# Train baseline
lowercase :Any = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
lowercase :str = accelerator.prepare(lowerCamelCase__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Optional[int] = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = "/tmp/accelerate/state_checkpointing"
_UpperCAmelCase : List[str] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_UpperCAmelCase : Tuple = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_UpperCAmelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_UpperCAmelCase : Optional[int] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
_UpperCAmelCase : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_UpperCAmelCase : List[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_UpperCAmelCase : Dict = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 236
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 296
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 368
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284
| 0
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = "utf-8"
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True # deprecated
UpperCAmelCase = None # deprecated
UpperCAmelCase = 10 << 20 # 10MB
UpperCAmelCase = None
class lowercase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase = JsonConfig
def _snake_case ( self ) -> Optional[Any]:
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
_UpperCAmelCase : str = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self ,a_ ) -> List[Any]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_UpperCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ ,(str, list, tuple) ):
_UpperCAmelCase : List[Any] = data_files
if isinstance(a_ ,a_ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : str = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
_UpperCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a_ ,a_ ):
_UpperCAmelCase : Union[str, Any] = [files]
_UpperCAmelCase : List[str] = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_ ,gen_kwargs={"""files""": files} ) )
return splits
def _snake_case ( self ,a_ ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_UpperCAmelCase : Tuple = self.config.features.arrow_schema.field(a_ ).type
_UpperCAmelCase : Dict = pa_table.append_column(a_ ,pa.array([None] * len(a_ ) ,type=a_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCAmelCase : List[str] = table_cast(a_ ,self.config.features.arrow_schema )
return pa_table
def _snake_case ( self ,a_ ) -> Union[str, Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(a_ ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f:
_UpperCAmelCase : Any = json.load(a_ )
# We keep only the field we are interested in
_UpperCAmelCase : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(a_ ,(list, tuple) ):
_UpperCAmelCase : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
_UpperCAmelCase : Dict = {col: [row.get(a_ ) for row in dataset] for col in keys}
else:
_UpperCAmelCase : List[Any] = dataset
_UpperCAmelCase : Tuple = pa.Table.from_pydict(a_ )
yield file_idx, self._cast_table(a_ )
# If the file has one json object per line
else:
with open(a_ ,"""rb""" ) as f:
_UpperCAmelCase : Optional[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_UpperCAmelCase : List[Any] = max(self.config.chunksize // 32 ,16 << 10 )
_UpperCAmelCase : Dict = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
_UpperCAmelCase : int = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(a_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_UpperCAmelCase : int = batch.decode(self.config.encoding ,errors=a_ ).encode("""utf-8""" )
try:
while True:
try:
_UpperCAmelCase : Optional[int] = paj.read_json(
io.BytesIO(a_ ) ,read_options=paj.ReadOptions(block_size=a_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(a_ ,pa.ArrowInvalid )
and "straddling" not in str(a_ )
or block_size > len(a_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(a_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
a_ ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f:
_UpperCAmelCase : List[Any] = json.load(a_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(a_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(a_ ,a_ ): # list is the only sequence type supported in JSON
try:
_UpperCAmelCase : Any = set().union(*[row.keys() for row in dataset] )
_UpperCAmelCase : int = {col: [row.get(a_ ) for row in dataset] for col in keys}
_UpperCAmelCase : Any = pa.Table.from_pydict(a_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(a_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(a_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(a_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a_ )
batch_idx += 1
| 215
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """longformer"""
def __init__( self ,a_ = 512 ,a_ = 2 ,a_ = 1 ,a_ = 0 ,a_ = 2 ,a_ = 30_522 ,a_ = 768 ,a_ = 12 ,a_ = 12 ,a_ = 3_072 ,a_ = "gelu" ,a_ = 0.1 ,a_ = 0.1 ,a_ = 512 ,a_ = 2 ,a_ = 0.02 ,a_ = 1E-1_2 ,a_ = False ,**a_ ,) -> List[Any]:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = attention_window
_UpperCAmelCase : Any = sep_token_id
_UpperCAmelCase : Dict = bos_token_id
_UpperCAmelCase : Tuple = eos_token_id
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = onnx_export
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = "default" ,a_ = None ) -> int:
super().__init__(a_ ,a_ ,a_ )
_UpperCAmelCase : Tuple = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCAmelCase : str = super().outputs
if self.task == "default":
_UpperCAmelCase : int = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset ,14 )
def _snake_case ( self ,a_ ,a_ = -1 ,a_ = -1 ,a_ = False ,a_ = None ,) -> Mapping[str, Any]:
_UpperCAmelCase : List[str] = super().generate_dummy_inputs(
preprocessor=a_ ,batch_size=a_ ,seq_length=a_ ,is_pair=a_ ,framework=a_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_UpperCAmelCase : int = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
_UpperCAmelCase : List[str] = 1
return inputs
| 215
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _lowercase ( snake_case_ ):
lowercase = 'xlm-roberta-xl'
def __init__( self : List[str] , snake_case : Optional[int]=2_5_0_8_8_0 , snake_case : List[str]=2_5_6_0 , snake_case : Optional[Any]=3_6 , snake_case : Union[str, Any]=3_2 , snake_case : Optional[Any]=1_0_2_4_0 , snake_case : Any="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Any=0.1 , snake_case : Any=5_1_4 , snake_case : List[Any]=1 , snake_case : int=0.02 , snake_case : List[str]=1e-05 , snake_case : List[str]=1 , snake_case : List[Any]=0 , snake_case : List[str]=2 , snake_case : int="absolute" , snake_case : str=True , snake_case : Dict=None , **snake_case : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCamelCase_ : Any = vocab_size
UpperCamelCase_ : str = hidden_size
UpperCamelCase_ : Union[str, Any] = num_hidden_layers
UpperCamelCase_ : Optional[Any] = num_attention_heads
UpperCamelCase_ : Union[str, Any] = hidden_act
UpperCamelCase_ : Any = intermediate_size
UpperCamelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase_ : List[str] = attention_probs_dropout_prob
UpperCamelCase_ : List[str] = max_position_embeddings
UpperCamelCase_ : Union[str, Any] = type_vocab_size
UpperCamelCase_ : List[str] = initializer_range
UpperCamelCase_ : Tuple = layer_norm_eps
UpperCamelCase_ : List[Any] = position_embedding_type
UpperCamelCase_ : Optional[Any] = use_cache
UpperCamelCase_ : List[Any] = classifier_dropout
class _lowercase ( snake_case_ ):
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase_ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_ : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 50
|
def __lowercase ( lowerCamelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
UpperCamelCase_ : Optional[int] = sorted(string.lower() )
return len(lowerCamelCase ) == len(set(lowerCamelCase ) )
if __name__ == "__main__":
a_ = input('Enter a string ').strip()
a_ = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 50
| 1
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : str , **a : List[Any] ):
"""simple docstring"""
super().__init__(**a )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict , a : Union[str, List[str], "Image", List["Image"]] , **a : Dict ):
"""simple docstring"""
return super().__call__(a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = {}
if "candidate_labels" in kwargs:
__lowerCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__lowerCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Dict=None , a : Any="This is a photo of {}." ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = self.image_processor(images=[image] , return_tensors=self.framework )
__lowerCamelCase = candidate_labels
__lowerCamelCase = [hypothesis_template.format(a ) for x in candidate_labels]
__lowerCamelCase = self.tokenizer(a , return_tensors=self.framework , padding=a )
__lowerCamelCase = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = model_inputs.pop('''candidate_labels''' )
__lowerCamelCase = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , a ):
__lowerCamelCase = text_inputs[0]
else:
# Batching case.
__lowerCamelCase = text_inputs[0][0]
__lowerCamelCase = self.model(**a , **a )
__lowerCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = model_outputs.pop('''candidate_labels''' )
__lowerCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
__lowerCamelCase = logits.softmax(dim=-1 ).squeeze(-1 )
__lowerCamelCase = probs.tolist()
if not isinstance(a , a ):
__lowerCamelCase = [scores]
elif self.framework == "tf":
__lowerCamelCase = stable_softmax(a , axis=-1 )
__lowerCamelCase = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowerCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(a , a ) , key=lambda a : -x[0] )
]
return result
| 67
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230
| 0
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def a ( self : int ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def a ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase__ = self.dummy_uncond_unet
lowerCAmelCase__ = PNDMScheduler()
lowerCAmelCase__ = PNDMPipeline(unet=_a , scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pndm(generator=_a , num_inference_steps=20 , output_type="numpy" ).images
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pndm(generator=_a , num_inference_steps=20 , output_type="numpy" , return_dict=_a )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Union[str, Any] ) -> List[Any]:
lowerCAmelCase__ = "google/ddpm-cifar10-32"
lowerCAmelCase__ = UNetaDModel.from_pretrained(_a )
lowerCAmelCase__ = PNDMScheduler()
lowerCAmelCase__ = PNDMPipeline(unet=_a , scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pndm(generator=_a , output_type="numpy" ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 350
|
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
return (args[0] + 1,) + args[1:], kwargs
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
return output + 1
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : List[str] ) -> Tuple:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE__ )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_hf_hook" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
def a ( self : Union[str, Any] ) -> int:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , append=SCREAMING_SNAKE_CASE__ )
self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_hf_hook" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
def a ( self : List[str] ) -> Any:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(x + 1 )
lowerCAmelCase__ = test_model(x + 2 )
lowerCAmelCase__ = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 )
def a ( self : Any ) -> Union[str, Any]:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , output + 2 , atol=1e-5 )
def a ( self : Optional[int] ) -> int:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCAmelCase__ = True
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(SCREAMING_SNAKE_CASE__ , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.randn(2 , 3 ).to(0 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(0 ) )
def a ( self : List[str] ) -> List[str]:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
lowerCAmelCase__ = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , offload_buffers=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 221
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__a = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( a_ ):
A : Optional[Any] = 'sequence-classification'
def __init__( self , SCREAMING_SNAKE_CASE__ ):
if type(lowercase_ ) == dict:
lowercase : int = Namespace(**lowercase_ )
lowercase : int = glue_output_modes[hparams.task]
lowercase : List[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase_ , lowercase_ , self.mode )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
return self.model(**lowercase_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase : Optional[Any] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase : Optional[int] = self(**lowercase_ )
lowercase : Union[str, Any] = outputs[0]
lowercase : Union[str, Any] = self.trainer.lr_schedulers[0]['''scheduler''']
lowercase : Union[str, Any] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __lowerCamelCase ( self ):
lowercase : Tuple = self.hparams
lowercase : int = processors[args.task]()
lowercase : int = processor.get_labels()
for mode in ["train", "dev"]:
lowercase : int = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , lowercase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase : int = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowercase : Optional[int] = convert_examples_to_features(
lowercase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
lowercase : Tuple = '''dev''' if mode == '''test''' else mode
lowercase : Tuple = self._feature_file(lowercase_ )
logger.info('''Loading features from cached file %s''' , lowercase_ )
lowercase : Union[str, Any] = torch.load(lowercase_ )
lowercase : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowercase : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowercase : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowercase : List[Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ , shuffle=lowercase_ , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase : int = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase : int = self(**lowercase_ )
lowercase : str = outputs[:2]
lowercase : Optional[Any] = logits.detach().cpu().numpy()
lowercase : Union[str, Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowercase : Any = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowercase : Any = np.argmax(lowercase_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowercase : Optional[Any] = np.squeeze(lowercase_ )
lowercase : Union[str, Any] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
lowercase : str = [[] for _ in range(out_label_ids.shape[0] )]
lowercase : Any = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase_ , lowercase_ )}
lowercase : Optional[int] = dict(results.items() )
lowercase : Any = results
return ret, preds_list, out_label_list
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = self._eval_end(lowercase_ )
lowercase : str = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = self._eval_end(lowercase_ )
lowercase : List[str] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=lowercase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=lowercase_ , required=lowercase_ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=lowercase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __lowercase ( ) ->Optional[Any]:
"""simple docstring"""
lowercase : str = argparse.ArgumentParser()
add_generic_args(A_, os.getcwd() )
lowercase : int = GLUETransformer.add_model_specific_args(A_, os.getcwd() )
lowercase : Tuple = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase : Tuple = os.path.join(
'''./results''', f"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""", )
os.makedirs(args.output_dir )
lowercase : Tuple = GLUETransformer(A_ )
lowercase : Tuple = generic_train(A_, A_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt''' ), recursive=A_ ) )
lowercase : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(A_ )
if __name__ == "__main__":
main()
| 337
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase_ : Dict ,lowercase_ : Dict=7 ,lowercase_ : Optional[int]=3 ,lowercase_ : int=3_0 ,lowercase_ : Optional[Any]=4_0_0 ,lowercase_ : Any=True ,lowercase_ : List[str]=None ,lowercase_ : str=True ,lowercase_ : List[Any]=[0.5, 0.5, 0.5] ,lowercase_ : List[str]=[0.5, 0.5, 0.5] ,lowercase_ : Any=True ,lowercase_ : Union[str, Any]=1 / 2_5_5 ,lowercase_ : str=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : Optional[Any] = min_resolution
lowerCAmelCase__ : Union[str, Any] = max_resolution
lowerCAmelCase__ : Optional[int] = do_resize
lowerCAmelCase__ : str = size
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : List[str] = image_mean
lowerCAmelCase__ : str = image_std
lowerCAmelCase__ : Optional[Any] = do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor
lowerCAmelCase__ : Optional[Any] = do_pad
def __lowerCAmelCase ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[Any] ,lowercase_ : int=False ):
if not batched:
lowerCAmelCase__ : Tuple = image_inputs[0]
if isinstance(lowercase_ ,Image.Image ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = image.size
else:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ : Any = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase__ : str = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase__ : Union[str, Any] = self.size['''shortest_edge''']
lowerCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase__ : List[str] = self.size['''shortest_edge''']
lowerCAmelCase__ : str = self.size['''shortest_edge''']
else:
lowerCAmelCase__ : Optional[Any] = []
for image in image_inputs:
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ : List[str] = max(lowercase_ ,key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase__ : Any = max(lowercase_ ,key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = DetaImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = DetaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase_ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_rescale''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_pad''' ) )
self.assertTrue(hasattr(lowercase_ ,'''size''' ) )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad ,lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
pass
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,Image.Image )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCAmelCase ( self : Dict ):
# Initialize image_processing
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : str = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : str = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __lowerCAmelCase ( self : Tuple ):
# prepare image and target
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' ,'''r''' ) as f:
lowerCAmelCase__ : Union[str, Any] = json.loads(f.read() )
lowerCAmelCase__ : str = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCAmelCase__ : Optional[Any] = DetaImageProcessor()
lowerCAmelCase__ : Optional[int] = image_processing(images=lowercase_ ,annotations=lowercase_ ,return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase__ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape ,lowercase_ )
lowerCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase_ ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase_ ) )
# verify boxes
lowerCAmelCase__ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase_ )
lowerCAmelCase__ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase_ ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase_ ) )
# verify is_crowd
lowerCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase_ ) )
# verify class_labels
lowerCAmelCase__ : Any = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase_ ) )
# verify orig_size
lowerCAmelCase__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase_ ) )
# verify size
lowerCAmelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase_ ) )
@slow
def __lowerCAmelCase ( self : Any ):
# prepare image, target and masks_path
lowerCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' ,'''r''' ) as f:
lowerCAmelCase__ : str = json.loads(f.read() )
lowerCAmelCase__ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCAmelCase__ : Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase__ : str = DetaImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase__ : Optional[int] = image_processing(images=lowercase_ ,annotations=lowercase_ ,masks_path=lowercase_ ,return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase__ : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape ,lowercase_ )
lowerCAmelCase__ : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase_ ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Tuple = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase_ ) )
# verify boxes
lowerCAmelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase_ )
lowerCAmelCase__ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase_ ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase_ ) )
# verify is_crowd
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase_ ) )
# verify class_labels
lowerCAmelCase__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase_ ) )
# verify masks
lowerCAmelCase__ : Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() ,lowercase_ )
# verify orig_size
lowerCAmelCase__ : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase_ ) )
# verify size
lowerCAmelCase__ : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase_ ) )
| 106
| 0
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase__ ( a , a=() , a=None , a="no" , a="29500" ) -> int:
_A: Dict = False
_A: Dict = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
_A: Any = True
elif "IPython" in sys.modules:
_A: Any = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
_A: Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , a ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
_A: Dict = 8
_A: Tuple = PrepareForLaunch(a , distributed_type='''TPU''' )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(a , args=a , nprocs=a , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*a )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a , master_addr='''127.0.01''' , master_port=a , mixed_precision=a ):
_A: Optional[Any] = PrepareForLaunch(a , distributed_type='''MULTI_GPU''' )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(a , args=a , nprocs=a , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_A: Any = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*a )
def lowerCamelCase__ ( a , a=() , a=2 ) -> List[str]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
_A: int = PrepareForLaunch(a , debug=a )
start_processes(a , args=a , nprocs=a , start_method='''fork''' )
| 355
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lt__( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCamelCase__ ( a ) -> list:
_A: list[Stack] = []
# sort into stacks
for element in collection:
_A: Any = Stack([element] )
_A: Optional[Any] = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
_A: Tuple = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 301
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[Any] ={
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int =[
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected', [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_, i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected', [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = _split_gen_kwargs(lowerCAmelCase_, lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected', [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 284
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE : Tuple = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
__SCREAMING_SNAKE_CASE : Any = '''▁'''
# Segments (not really needed)
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Any = 1
__SCREAMING_SNAKE_CASE : Optional[int] = 2
__SCREAMING_SNAKE_CASE : Optional[Any] = 3
__SCREAMING_SNAKE_CASE : int = 4
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Dict = VOCAB_FILES_NAMES
lowercase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = 'left'
lowercase__ : Dict = XLNetTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , **lowerCamelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = 3
_lowerCamelCase = do_lower_case
_lowerCamelCase = remove_space
_lowerCamelCase = keep_accents
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 73
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = ['pixel_values']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 3_8_4}
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = do_resize
_lowerCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_lowerCamelCase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_lowerCamelCase = resample
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
_lowerCamelCase = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_lowerCamelCase = int(shortest_edge / crop_pct )
_lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__ , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__ , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase = image_std if image_std is not None else self.image_std
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , crop_pct=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 73
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowerCamelCase__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowerCamelCase__ : Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ : str = block_out_channels[0] * 4
lowerCamelCase__ : List[Any] = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCamelCase__ : Any = nn.ModuleList([] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Optional[int] = None
# down
lowerCamelCase__ : Optional[int] = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = output_channel
lowerCamelCase__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Optional[int] = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Optional[int] = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ : List[str] = out_channels
else:
lowerCamelCase__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Dict = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : int = output_channel
# out
lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ : List[Any] = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] = timesteps[None].to(sample.device )
lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = timestep_embed[..., None]
lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ : str = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ : Dict = down_block_res_samples[-1:]
lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1]
lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 50
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[str] = len(_UpperCAmelCase )
lowerCamelCase__ : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCamelCase__ : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCamelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCamelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCamelCase__ : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def _A (__a , __a ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ : Any = """examples/"""
UpperCAmelCase_ : Optional[int] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ : List[Any] = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ : Optional[int] = """README.md"""
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a )
SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a )
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__a )
def _A (__a ) -> int:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' )
def _A (__a , __a=False ) -> List[str]:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a , __a , __a )
if not patch:
update_version_in_examples(__a )
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?'''
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__a )
def _A () -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.read()
SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def _A (__a=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version
print(f'Updating version to {version}.' )
global_version_update(__a , patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A () -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_version()
SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 318
| 0
|
"""simple docstring"""
def _A ( UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _A ( UpperCamelCase_ : dict[int, list[int]]) -> list[tuple[int, int]]:
'''simple docstring'''
__lowercase = 0
__lowercase = len(UpperCamelCase_) # No of vertices in graph
__lowercase = [0] * n
__lowercase = [False] * n
def dfs(UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Optional[int], UpperCamelCase_ : Any, UpperCamelCase_ : Optional[Any]):
__lowercase = True
__lowercase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, id_)
__lowercase = min(low[at], low[to])
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
__lowercase = min(low[at], low[to])
__lowercase = []
for i in range(UpperCamelCase_):
if not visited[i]:
dfs(UpperCamelCase_, -1, UpperCamelCase_, id_)
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCamelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = test_results.split(' ' )
A__ = 0
A__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = {}
A__ = None
A__ = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , UpperCamelCase__ ):
A__ = True
A__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A__ = line
A__ = False
return failures
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = title
A__ = doc_test_results['time_spent'].split(',' )[0]
A__ = doc_test_results['success']
A__ = doc_test_results['failures']
A__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
A__ = doc_test_results
@property
def snake_case__ ( self ) -> str:
A__ = [self._time_spent]
A__ = 0
for time in time_spent:
A__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCAmelCase ) == 1:
A__ = [0, 0, time_parts[0]]
A__ , A__ , A__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A__ , A__ , A__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'''{int(__UpperCAmelCase )}h{int(__UpperCAmelCase )}m{int(__UpperCAmelCase )}s'''
@property
def snake_case__ ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def snake_case__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def snake_case__ ( self ) -> Dict:
A__ = 40
A__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(__UpperCAmelCase ,__UpperCAmelCase )}
A__ = ''
for category, failures in category_failures.items():
if len(__UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def snake_case__ ( self ) -> str:
A__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCAmelCase )
@staticmethod
def snake_case__ ( ) -> Any:
A__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(__UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text='There was an issue running the tests.' ,blocks=__UpperCAmelCase ,)
def snake_case__ ( self ) -> int:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A__ = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
A__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,blocks=self.payload ,text=__UpperCAmelCase ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
A__ = ''
for key, value in failures.items():
A__ = value[:2_00] + ' [Truncated]' if len(__UpperCAmelCase ) > 2_50 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
A__ = job_name
A__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case__ ( self ) -> Dict:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A__ = sorted(self.doc_test_results.items() ,key=lambda __UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A__ = f'''*Num failures* :{len(job_result["failed"] )} \n'''
A__ = job_result['failures']
A__ = self.get_reply_blocks(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,text=__UpperCAmelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text=f'''Results for {job}''' ,blocks=__UpperCAmelCase ,thread_ts=self.thread_ts['ts'] ,)
time.sleep(1 )
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = os.environ['GITHUB_RUN_ID']
A__ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
A__ = requests.get(UpperCamelCase__ ).json()
A__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(UpperCamelCase__ ):
A__ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , UpperCamelCase__ )
return {}
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = {}
if os.path.exists(UpperCamelCase__ ):
A__ = os.listdir(UpperCamelCase__ )
for file in files:
try:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , encoding='utf-8' ) as f:
A__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(UpperCamelCase__ , UpperCamelCase__ )}.''' ) from e
return _artifact
def UpperCAmelCase ( ):
"""simple docstring"""
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = name
A__ = []
def __str__( self ) -> List[str]:
return self.name
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
self.paths.append({'name': self.name, 'path': path} )
A__ = {}
A__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
A__ = directory
if artifact_name not in _available_artifacts:
A__ = Artifact(UpperCamelCase__ )
_available_artifacts[artifact_name].add_path(UpperCamelCase__ )
return _available_artifacts
if __name__ == "__main__":
__lowerCamelCase = get_job_links()
__lowerCamelCase = retrieve_available_artifacts()
__lowerCamelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCamelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCamelCase = github_actions_job_links.get("run_doctests")
__lowerCamelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__lowerCamelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = handle_test_results(artifact["stats"])
__lowerCamelCase = failed
__lowerCamelCase = success
__lowerCamelCase = time_spent[1:-1] + ", "
__lowerCamelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__lowerCamelCase = line.replace("FAILED ", "")
__lowerCamelCase = line.split()[0].replace("\n", "")
if "::" in line:
__lowerCamelCase , __lowerCamelCase = line.split("::")
else:
__lowerCamelCase , __lowerCamelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCamelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCamelCase = all_failures[test] if test in all_failures else "N/A"
__lowerCamelCase = failure
break
__lowerCamelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 221
| 0
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24
|
"""simple docstring"""
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__snake_case : Any = len(bin(_snake_case )[3:] )
__snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
__snake_case : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
| 1
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def a__ ( SCREAMING_SNAKE_CASE : int = 8 ):
'''simple docstring'''
lowerCAmelCase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
i -= len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = i // 3
lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase : Union[str, Any] = (
chars_incl
+ random(SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
)
lowerCAmelCase : List[Any] = list(SCREAMING_SNAKE_CASE )
shuffle(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 8 ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase : Optional[Any] = any(char in ascii_uppercase for char in password )
lowerCAmelCase : List[str] = any(char in ascii_lowercase for char in password )
lowerCAmelCase : Optional[int] = any(char in digits for char in password )
lowerCAmelCase : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = int(input("Please indicate the max length of your password: " ).strip() )
lowerCAmelCase : int = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(SCREAMING_SNAKE_CASE ) )
print(
"Alternative Password generated:" , alternative_password_generator(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 108
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class a__ ( snake_case__ ):
_a : Union[str, Any] = """poolformer"""
def __init__( self , _A=3 , _A=1_6 , _A=1_6 , _A=3 , _A=4.0 , _A=[2, 2, 6, 2] , _A=[6_4, 1_2_8, 3_2_0, 5_1_2] , _A=[7, 3, 3, 3] , _A=[4, 2, 2, 2] , _A=[2, 1, 1, 1] , _A=4 , _A=0.0 , _A="gelu" , _A=True , _A=1E-5 , _A=0.02 , **_A , ):
"""simple docstring"""
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = stride
__lowerCAmelCase = padding
__lowerCAmelCase = pool_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = depths
__lowerCAmelCase = patch_sizes
__lowerCAmelCase = strides
__lowerCAmelCase = num_encoder_blocks
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_layer_scale
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = initializer_range
super().__init__(**_A )
class a__ ( snake_case__ ):
_a : Union[str, Any] = version.parse("""1.11""" )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 2E-3
| 370
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCamelCase__ = logging.getLogger(__name__)
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = False
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
if not self.initialized:
__lowerCAmelCase = RagRetriever(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__lowerCAmelCase = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.retriever.index.init_index()
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.retriever._main_retrieve(_A , _A )
return doc_ids, retrieved_doc_embeds
class a__ ( snake_case__ ):
def __init__( self , _A , _A , _A , _A , _A=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(_A ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__lowerCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_A , _A , _A , _A )
for worker in self.retrieval_workers
] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowerCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowerCAmelCase , __lowerCAmelCase = ray.get(random_worker.retrieve.remote(_A , _A ) )
else:
__lowerCAmelCase , __lowerCAmelCase = self._main_retrieve(_A , _A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , _A , _A=None , **_A ):
"""simple docstring"""
return super(_A , cls ).get_tokenizers(_A , _A , **_A )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase = kwargs.pop("config" , _A ) or RagConfig.from_pretrained(_A , **_A )
__lowerCAmelCase = RagTokenizer.from_pretrained(_A , config=_A )
__lowerCAmelCase = rag_tokenizer.question_encoder
__lowerCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__lowerCAmelCase = "custom"
__lowerCAmelCase = CustomHFIndex(config.retrieval_vector_size , _A )
else:
__lowerCAmelCase = cls._build_index(_A )
return cls(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , retrieval_workers=_A , index=_A , )
| 102
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : Dict=3_2 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : Dict=[1_0, 2_0, 3_0, 4_0] ,SCREAMING_SNAKE_CASE__ : List[Any]=[1, 1, 2, 1] ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]="relu" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=None ,):
__lowerCamelCase : str = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Optional[int] = embeddings_size
__lowerCamelCase : List[Any] = hidden_sizes
__lowerCamelCase : int = depths
__lowerCamelCase : Any = is_training
__lowerCamelCase : Any = use_labels
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : List[Any] = num_labels
__lowerCamelCase : List[str] = scope
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : str):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Dict = FlaxRegNetModel(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = FlaxRegNetForImageClassification(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase : List[Any] = config_and_inputs
__lowerCamelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : List[str] = False
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[Any] = FlaxRegNetModelTester(self)
__lowerCamelCase : str = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Union[str, Any]):
return
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='RegNet does not use inputs_embeds')
def lowerCAmelCase ( self : List[str]):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def lowerCAmelCase ( self : Dict):
pass
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[Any] = [*signature.parameters.keys()]
__lowerCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,expected_num_stages + 1)
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = model_class(SCREAMING_SNAKE_CASE__)
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
return model(pixel_values=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
with self.subTest('JIT Enabled'):
__lowerCamelCase : str = model_jitted(**SCREAMING_SNAKE_CASE__).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__lowerCamelCase : Dict = model_jitted(**SCREAMING_SNAKE_CASE__).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,len(SCREAMING_SNAKE_CASE__))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
self.assertEqual(jitted_output.shape ,output.shape)
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
__lowerCamelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : Optional[Any]):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040') if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040')
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Optional[Any] = prepare_img()
__lowerCamelCase : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='np')
__lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Optional[Any] = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a ="""▁"""
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : Any="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase : Dict = (
AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ,normalized=SCREAMING_SNAKE_CASE__)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else mask_token
)
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = do_lower_case
__lowerCamelCase : Union[str, Any] = remove_space
__lowerCamelCase : Tuple = keep_accents
__lowerCamelCase : Dict = vocab_file
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Optional[Any]):
return len(self.sp_model)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Union[str, Any]):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Tuple = None
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
if self.remove_space:
__lowerCamelCase : Dict = ' '.join(inputs.strip().split())
else:
__lowerCamelCase : Optional[Any] = inputs
__lowerCamelCase : Tuple = outputs.replace('``' ,'"').replace('\'\'' ,'"')
if not self.keep_accents:
__lowerCamelCase : List[str] = unicodedata.normalize('NFKD' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = ''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__)])
if self.do_lower_case:
__lowerCamelCase : Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowerCamelCase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ ,''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
__lowerCamelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(SCREAMING_SNAKE_CASE__)
else:
new_pieces.append(SCREAMING_SNAKE_CASE__)
return new_pieces
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Any):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : int = ''
__lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Any = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string.strip()
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
| 73
| 1
|
# Lint as: python3
import itertools
import os
import re
lowercase_ = re.compile(r"([A-Z]+)([A-Z][a-z])")
lowercase_ = re.compile(r"([a-z\d])([A-Z])")
lowercase_ = re.compile(r"(?<!_)_(?!_)")
lowercase_ = re.compile(r"(_{2,})")
lowercase_ = r"^\w+(\.\w+)*$"
lowercase_ = r"<>:/\|?*"
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
'''simple docstring'''
A__ = _uppercase_uppercase_re.sub(R'\1_\2' , SCREAMING_SNAKE_CASE__ )
A__ = _lowercase_uppercase_re.sub(R'\1_\2' , SCREAMING_SNAKE_CASE__ )
return name.lower()
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
'''simple docstring'''
A__ = _single_underscore_re.split(SCREAMING_SNAKE_CASE__ )
A__ = [_multiple_underscores_re.split(SCREAMING_SNAKE_CASE__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) if n != '' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
'''simple docstring'''
if os.path.basename(SCREAMING_SNAKE_CASE__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
'''simple docstring'''
if os.path.basename(SCREAMING_SNAKE_CASE__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return f'{filename_prefix_for_name(SCREAMING_SNAKE_CASE__ )}-{split}'
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> str:
'''simple docstring'''
A__ = filename_prefix_for_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if filetype_suffix:
prefix += f'.{filetype_suffix}'
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return f'{filepath}*'
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : int=None ) -> Optional[int]:
'''simple docstring'''
A__ = filename_prefix_for_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if shard_lengths:
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(SCREAMING_SNAKE_CASE__ )]
if filetype_suffix:
A__ = [filename + f'.{filetype_suffix}' for filename in filenames]
return filenames
else:
A__ = prefix
if filetype_suffix:
filename += f'.{filetype_suffix}'
return [filename]
| 282
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = VideoToVideoSDPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase = False
# No `output_type`.
lowerCamelCase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4),layers_per_block=2,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=3_2,attention_head_dim=4,)
A__ = DDIMScheduler(
beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,)
A__ = CLIPTextModel(lowercase_ )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int],lowercase_ : List[Any]=0 )-> Any:
'''simple docstring'''
A__ = floats_tensor((1, 3, 3, 3_2, 3_2),rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = VideoToVideoSDPipeline(**lowercase_ )
A__ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
A__ = self.get_dummy_inputs(lowercase_ )
A__ = 'np'
A__ = sd_pipe(**lowercase_ ).frames
A__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
A__ = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case__ ( self : List[Any] )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6),generator=lowercase_ )
A__ = video.to('cuda' )
A__ = 'Spiderman is surfing'
A__ = pipe(lowercase_,video=lowercase_,generator=lowercase_,num_inference_steps=3,output_type='pt' ).frames
A__ = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 282
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18
|
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 0
|
'''simple docstring'''
snake_case_ : str = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
snake_case_ : Tuple = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( UpperCAmelCase_ ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( UpperCAmelCase_ ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ):
_UpperCamelCase : Optional[int] = 'Morse code here!'
print(UpperCAmelCase_ )
_UpperCamelCase : Tuple = encrypt(UpperCAmelCase_ )
print(UpperCAmelCase_ )
_UpperCamelCase : List[str] = decrypt(UpperCAmelCase_ )
print(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 356
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Optional[int] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 236
| 0
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
snake_case_ = logging.getLogger(__name__)
def lowerCamelCase__ ( snake_case_ : torch.nn.Module , snake_case_ : BnbQuantizationConfig , snake_case_ : Union[str, os.PathLike] = None , snake_case_ : Optional[Dict[str, Union[int, str, torch.device]]] = None , snake_case_ : Optional[List[str]] = None , snake_case_ : Optional[Dict[Union[int, str], Union[int, str]]] = None , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , ) -> Optional[Any]:
__snake_case = bnb_quantization_config.load_in_abit
__snake_case = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
__snake_case = []
# custom device map
if isinstance(snake_case_ , snake_case_ ) and len(device_map.keys() ) > 1:
__snake_case = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__snake_case = get_keys_to_not_convert(snake_case_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case_ )
__snake_case = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__snake_case = []
__snake_case = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case_ )
# compatibility with peft
__snake_case = load_in_abit
__snake_case = load_in_abit
__snake_case = get_parameter_device(snake_case_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
__snake_case = replace_with_bnb_layers(snake_case_ , snake_case_ , modules_to_not_convert=snake_case_ )
# convert param to the right dtype
__snake_case = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__snake_case = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
__snake_case = getattr(snake_case_ , snake_case_ , snake_case_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(snake_case_ ):
param.to(snake_case_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
__snake_case = replace_with_bnb_layers(
snake_case_ , snake_case_ , modules_to_not_convert=snake_case_ )
__snake_case = get_quantized_model_device_map(
snake_case_ , snake_case_ , snake_case_ , max_memory=snake_case_ , no_split_module_classes=snake_case_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__snake_case = True
__snake_case = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
snake_case_ , snake_case_ , snake_case_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case_ , offload_state_dict=snake_case_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case_ , device_map=snake_case_ , offload_dir=snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any]=None , snake_case_ : int=None , snake_case_ : Dict=None ) -> Any:
if device_map is None:
if torch.cuda.is_available():
__snake_case = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(snake_case_ , snake_case_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
__snake_case = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__snake_case = {}
__snake_case = special_dtypes
__snake_case = no_split_module_classes
__snake_case = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__snake_case = get_balanced_memory(
snake_case_ , low_zero=(device_map == '''balanced_low_0''') , max_memory=snake_case_ , **snake_case_ , )
__snake_case = max_memory
__snake_case = infer_auto_device_map(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
# check if don't have any quantized module on the cpu
__snake_case = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__snake_case = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None ) -> int:
if modules_to_not_convert is None:
__snake_case = []
__snake_case , __snake_case = _replace_with_bnb_layers(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : str=None , snake_case_ : int=None , ) -> Any:
__snake_case = False
for name, module in model.named_children():
if current_key_name is None:
__snake_case = []
current_key_name.append(snake_case_ )
if isinstance(snake_case_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__snake_case = '''.'''.join(snake_case_ )
__snake_case = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__snake_case = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__snake_case = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__snake_case = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
__snake_case = module.weight.data
if module.bias is not None:
__snake_case = module.bias.data
bnb_module.requires_grad_(snake_case_ )
setattr(snake_case_ , snake_case_ , snake_case_ )
__snake_case = True
if len(list(module.children() ) ) > 0:
__snake_case , __snake_case = _replace_with_bnb_layers(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__snake_case = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase__ ( snake_case_ : int ) -> Dict:
# Create a copy of the model
with init_empty_weights():
__snake_case = deepcopy(snake_case_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__snake_case = find_tied_parameters(snake_case_ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case_ , snake_case_ ):
__snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__snake_case = sum(snake_case_ , [] )
__snake_case = len(snake_case_ ) > 0
# Check if it is a base model
__snake_case = False
if hasattr(snake_case_ , '''base_model_prefix''' ):
__snake_case = not hasattr(snake_case_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__snake_case = list(model.named_children() )
__snake_case = [list_modules[-1][0]]
# add last module together with tied weights
__snake_case = set(snake_case_ ) - set(snake_case_ )
__snake_case = list(set(snake_case_ ) ) + list(snake_case_ )
# remove ".weight" from the keys
__snake_case = ['''.weight''', '''.bias''']
__snake_case = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__snake_case = name.replace(snake_case_ , '''''' )
filtered_module_names.append(snake_case_ )
return filtered_module_names
def lowerCamelCase__ ( snake_case_ : str ) -> str:
for m in model.modules():
if isinstance(snake_case_ , bnb.nn.Linearabit ):
return True
return False
def lowerCamelCase__ ( snake_case_ : nn.Module ) -> Dict:
return next(parameter.parameters() ).device
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Dict ) -> List[str]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case_ , snake_case_ , 0 , dtype=snake_case_ , value=snake_case_ )
__snake_case = param_name
__snake_case = model
if "." in tensor_name:
__snake_case = tensor_name.split('''.''' )
for split in splits[:-1]:
__snake_case = getattr(snake_case_ , snake_case_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
__snake_case = new_module
__snake_case = splits[-1]
# offload weights
__snake_case = False
offload_weight(module._parameters[tensor_name] , snake_case_ , snake_case_ , index=snake_case_ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , snake_case_ , index=snake_case_ , )
else:
offload_weight(snake_case_ , snake_case_ , snake_case_ , index=snake_case_ )
offload_weight(snake_case_ , param_name.replace('''weight''' , '''SCB''' ) , snake_case_ , index=snake_case_ )
set_module_tensor_to_device(snake_case_ , snake_case_ , '''meta''' , dtype=snake_case_ , value=torch.empty(*param.size() ) )
| 24
|
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
| 1
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _UpperCAmelCase ( ctypes.Structure ):
"""simple docstring"""
snake_case = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def __lowercase ( ) -> Any:
'''simple docstring'''
if os.name == "nt":
_A = CursorInfo()
_A = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCamelCase , ctypes.byref(_lowerCamelCase ) )
_A = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCamelCase , ctypes.byref(_lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
_A = CursorInfo()
_A = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCamelCase , ctypes.byref(_lowerCamelCase ) )
_A = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCamelCase , ctypes.byref(_lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def __lowercase ( ) -> List[str]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 370
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''vqvae''']
def __init__( self : List[str] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Mel , __UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , __UpperCAmelCase ) else 1000
@torch.no_grad()
def __call__( self : str , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = None , __UpperCAmelCase : np.ndarray = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = None , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : Dict=True , ):
'''simple docstring'''
_A = steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase )
_A = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_A = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_A = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
_A = noise
_A = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase )
_A = self.mel.audio_slice_to_image(__UpperCAmelCase )
_A = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_A = (input_image / 255) * 2 - 1
_A = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_A = self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=__UpperCAmelCase )[0]
_A = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_A = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
_A = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_A = int(mask_start_secs * pixels_per_second )
_A = int(mask_end_secs * pixels_per_second )
_A = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __UpperCAmelCase ):
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["sample"]
else:
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase )["sample"]
if isinstance(self.scheduler , __UpperCAmelCase ):
_A = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
else:
_A = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_A = mask[:, step, :, :mask_start]
if mask_end > 0:
_A = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_A = 1 / self.vqvae.config.scaling_factor * images
_A = self.vqvae.decode(__UpperCAmelCase )["sample"]
_A = (images / 2 + 0.5).clamp(0 , 1 )
_A = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_A = (images * 255).round().astype("uint8" )
_A = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode="RGB" ).convert("L" ) for _ in images) )
_A = [self.mel.image_to_audio(__UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCAmelCase ) )
@torch.no_grad()
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Image.Image] , __UpperCAmelCase : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , __UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase )
_A = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_A = (sample / 255) * 2 - 1
_A = torch.Tensor(__UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_A = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_A = self.scheduler.alphas_cumprod[t]
_A = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_A = 1 - alpha_prod_t
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase )["sample"]
_A = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_A = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_A = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCAmelCase ( __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : float ):
'''simple docstring'''
_A = acos(torch.dot(torch.flatten(__UpperCAmelCase ) , torch.flatten(__UpperCAmelCase ) ) / torch.norm(__UpperCAmelCase ) / torch.norm(__UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(__UpperCAmelCase )
| 174
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : str = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __a (__snake_case ):
__a : Optional[int] = "fnet"
def __init__( self : Dict , __magic_name__ : Optional[int]=3_20_00 , __magic_name__ : Dict=7_68 , __magic_name__ : Any=12 , __magic_name__ : Union[str, Any]=30_72 , __magic_name__ : List[Any]="gelu_new" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : str=0.0_2 , __magic_name__ : Any=1E-12 , __magic_name__ : str=False , __magic_name__ : List[str]=5_12 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=1 , __magic_name__ : Dict=2 , **__magic_name__ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Any = type_vocab_size
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : str = use_tpu_fourier_optimizations
UpperCAmelCase_ : Union[str, Any] = tpu_short_seq_length
| 125
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( _snake_case : Any ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
__snake_case : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
__snake_case : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
__snake_case : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__snake_case : Tuple = [3, 3, 3, 3]
__snake_case : Dict = [5, 5, 5, 5]
elif "fl4" in model_name:
__snake_case : Any = [4, 4, 4, 4]
__snake_case : List[str] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__snake_case : Optional[int] = [3, 3, 3, 3]
if "lrf" in model_name:
__snake_case : Any = [3, 3, 3, 3]
else:
__snake_case : int = [2, 2, 2, 2]
if "tiny" in model_name:
__snake_case : str = 96
elif "small" in model_name:
__snake_case : Optional[int] = 96
elif "base" in model_name:
__snake_case : Any = 128
elif "large" in model_name:
__snake_case : Optional[Any] = 192
elif "xlarge" in model_name:
__snake_case : List[Any] = 256
elif "huge" in model_name:
__snake_case : Union[str, Any] = 352
# set label information
__snake_case : Union[str, Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
__snake_case : int = '''imagenet-22k-id2label.json'''
else:
__snake_case : Optional[Any] = '''imagenet-1k-id2label.json'''
__snake_case : int = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
__snake_case : Dict = {int(_snake_case ): v for k, v in idalabel.items()}
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[Any] = FocalNetConfig(
embed_dim=_snake_case , depths=_snake_case , focal_levels=_snake_case , focal_windows=_snake_case , use_conv_embed=_snake_case , idalabel=_snake_case , labelaid=_snake_case , use_post_layernorm=_snake_case , use_layerscale=_snake_case , )
return config
def lowercase ( _snake_case : Dict ) ->List[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
__snake_case : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__snake_case : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__snake_case : List[Any] = '''encoder.''' + name
if "encoder.layers" in name:
__snake_case : Optional[Any] = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
__snake_case : Any = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
__snake_case : List[str] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__snake_case : Any = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__snake_case : List[Any] = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__snake_case : int = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
__snake_case : Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
__snake_case : List[str] = '''layernorm.bias'''
if "head" in name:
__snake_case : Union[str, Any] = name.replace('''head''' , '''classifier''' )
else:
__snake_case : int = '''focalnet.''' + name
return name
def lowercase ( _snake_case : Tuple , _snake_case : Dict , _snake_case : List[str]=False ) ->Any:
"""simple docstring"""
__snake_case : List[Any] = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
__snake_case : int = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _snake_case )
__snake_case : int = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
__snake_case : str = state_dict.pop(_snake_case )
__snake_case : Tuple = val
__snake_case : Any = get_focalnet_config(_snake_case )
__snake_case : List[Any] = FocalNetForImageClassification(_snake_case )
model.eval()
# load state dict
model.load_state_dict(_snake_case )
# verify conversion
__snake_case : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case : Any = BitImageProcessor(
do_resize=_snake_case , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_snake_case , crop_size=224 , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , )
__snake_case : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
__snake_case : int = processor(images=_snake_case , return_tensors='''pt''' )
__snake_case : Optional[int] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case : Optional[Any] = image_transforms(_snake_case ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _snake_case , atol=1e-4 )
__snake_case : Tuple = model(**_snake_case )
__snake_case : str = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__snake_case : Any = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__snake_case : int = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__snake_case : Optional[int] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__snake_case : List[Any] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__snake_case : Union[str, Any] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__snake_case : List[str] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 102
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowerCAmelCase__ : Optional[int] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 307
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 1
|
def a_ ( __lowercase : int , __lowercase : int ) -> int:
return x if y == 0 else greatest_common_divisor(__lowercase , x % y )
def a_ ( __lowercase : int , __lowercase : int ) -> int:
return (x * y) // greatest_common_divisor(__lowercase , __lowercase )
def a_ ( __lowercase : int = 20 ) -> int:
_snake_case = 1
for i in range(1 , n + 1 ):
_snake_case = lcm(__lowercase , __lowercase )
return g
if __name__ == "__main__":
print(F'{solution() = }')
| 282
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward()
| 282
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__A = logging.getLogger(__name__)
__A = {'facebook/bart-base': BartForConditionalGeneration}
__A = {'facebook/bart-base': BartTokenizer}
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
lowerCamelCase__: Dict =argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowerCamelCase_ , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowerCamelCase_ , default=lowerCamelCase_ , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowerCamelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowerCamelCase_ , )
parser.add_argument(
"--config_name" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowerCamelCase_ , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="Where to store the final ONNX file." )
lowerCamelCase__: List[Any] =parser.parse_args()
return args
def lowerCAmelCase_ ( __a , __a="cpu" ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =model_dict[model_name].from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
lowerCamelCase__: Dict =tokenizer_dict[model_name].from_pretrained(lowerCamelCase_ )
if model_name in ["facebook/bart-base"]:
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: int =0
return huggingface_model, tokenizer
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
model.eval()
lowerCamelCase__: Tuple =None
lowerCamelCase__: Optional[int] =torch.jit.script(BARTBeamSearchGenerator(lowerCamelCase_ ) )
with torch.no_grad():
lowerCamelCase__: Any ='''My friends are cool but they eat too many carbs.'''
lowerCamelCase__: List[Any] =tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
lowerCamelCase__: Tuple =model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowerCamelCase_ , max_length=lowerCamelCase_ , early_stopping=lowerCamelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowerCamelCase_ , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowerCamelCase_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowerCamelCase_ , )
logger.info("Model exported to {}".format(lowerCamelCase_ ) )
lowerCamelCase__: int =remove_dup_initializers(os.path.abspath(lowerCamelCase_ ) )
logger.info("Deduplicated and optimized model written to {}".format(lowerCamelCase_ ) )
lowerCamelCase__: List[str] =onnxruntime.InferenceSession(lowerCamelCase_ )
lowerCamelCase__: Dict =ort_sess.run(
lowerCamelCase_ , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowerCamelCase_ ),
"max_length": np.array(lowerCamelCase_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =parse_args()
lowerCamelCase__: Union[str, Any] =5
lowerCamelCase__: int =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCamelCase__: Any =torch.device(args.device )
lowerCamelCase__: Dict =load_model_tokenizer(args.model_name_or_path , lowerCamelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowerCamelCase_ )
if args.max_length:
lowerCamelCase__: int =args.max_length
if args.num_beams:
lowerCamelCase__: Optional[Any] =args.num_beams
if args.output_file_path:
lowerCamelCase__: Dict =args.output_file_path
else:
lowerCamelCase__: Union[str, Any] ='''BART.onnx'''
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 356
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowercase_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "The input training data file (a text file)."} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
if self.train_file is not None:
lowerCamelCase__: List[Any] =self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase__: List[Any] =self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = True
lowercase_ = None
lowercase_ = None
def __call__(self : Any , UpperCAmelCase_ : Dict) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str ="label" if "label" in features[0].keys() else "labels"
lowerCamelCase__: Union[str, Any] =[feature.pop(UpperCAmelCase_) for feature in features]
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: int =len(features[0]["input_ids"])
lowerCamelCase__: List[Any] =[
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase_)] for feature in features
]
lowerCamelCase__: Dict =list(chain(*UpperCAmelCase_))
lowerCamelCase__: Tuple =self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
lowerCamelCase__: Optional[Any] ={k: v.view(UpperCAmelCase_ , UpperCAmelCase_ , -1) for k, v in batch.items()}
# Add back labels
lowerCamelCase__: Optional[Any] =torch.tensor(UpperCAmelCase_ , dtype=torch.intaa)
return batch
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: str =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , __a , __a )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__: Dict =training_args.get_process_log_level()
logger.setLevel(__a )
datasets.utils.logging.set_verbosity(__a )
transformers.utils.logging.set_verbosity(__a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase__: List[str] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__: Any =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase__: List[Any] ={}
if data_args.train_file is not None:
lowerCamelCase__: List[str] =data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__: Optional[Any] =data_args.validation_file
lowerCamelCase__: List[Any] =data_args.train_file.split("." )[-1]
lowerCamelCase__: int =load_dataset(
__a , data_files=__a , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase__: List[Any] =load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__: int =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__: List[str] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__: Dict =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase__: Optional[int] =[F"""ending{i}""" for i in range(4 )]
lowerCamelCase__: List[str] ="sent1"
lowerCamelCase__: List[str] ="sent2"
if data_args.max_seq_length is None:
lowerCamelCase__: Optional[int] =tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
lowerCamelCase__: Optional[int] =1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase__: Any =min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__a ):
lowerCamelCase__: Tuple =[[context] * 4 for context in examples[context_name]]
lowerCamelCase__: List[Any] =examples[question_header_name]
lowerCamelCase__: Dict =[
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__a )
]
# Flatten out
lowerCamelCase__: str =list(chain(*__a ) )
lowerCamelCase__: str =list(chain(*__a ) )
# Tokenize
lowerCamelCase__: List[Any] =tokenizer(
__a , __a , truncation=__a , max_length=__a , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__a ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
lowerCamelCase__: List[Any] =raw_datasets["train"]
if data_args.max_train_samples is not None:
lowerCamelCase__: Dict =min(len(__a ) , data_args.max_train_samples )
lowerCamelCase__: Any =train_dataset.select(range(__a ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCamelCase__: Optional[Any] =train_dataset.map(
__a , batched=__a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
lowerCamelCase__: str =raw_datasets["validation"]
if data_args.max_eval_samples is not None:
lowerCamelCase__: Any =min(len(__a ) , data_args.max_eval_samples )
lowerCamelCase__: List[Any] =eval_dataset.select(range(__a ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCamelCase__: Tuple =eval_dataset.map(
__a , batched=__a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase__: Any =(
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__a , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__a ):
lowerCamelCase__ , lowerCamelCase__: List[str] =eval_predictions
lowerCamelCase__: Optional[int] =np.argmax(__a , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase__: Dict =Trainer(
model=__a , args=__a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__a , data_collator=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
lowerCamelCase__: List[Any] =None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__: int =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__: str =last_checkpoint
lowerCamelCase__: Any =trainer.train(resume_from_checkpoint=__a )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__: int =train_result.metrics
lowerCamelCase__: Optional[int] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(__a )
)
lowerCamelCase__: Optional[int] =min(__a , len(__a ) )
trainer.log_metrics("train" , __a )
trainer.save_metrics("train" , __a )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase__: int =trainer.evaluate()
lowerCamelCase__: Dict =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__a )
lowerCamelCase__: Tuple =min(__a , len(__a ) )
trainer.log_metrics("eval" , __a )
trainer.save_metrics("eval" , __a )
lowerCamelCase__: str ={
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__a )
else:
trainer.create_model_card(**__a )
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 273
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None ):
if start is None:
UpperCAmelCase : Any = 0
if end is None:
UpperCAmelCase : Any = len(UpperCAmelCase_ ) - 1
if start >= end:
return
UpperCAmelCase : Optional[Any] = (start + end) // 2
slowsort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
slowsort(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase : List[str] = sequence[mid], sequence[end]
slowsort(UpperCAmelCase_ , UpperCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 151
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[str] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236
| 0
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( _A , unittest.TestCase):
UpperCamelCase__ : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def A__ ( self : str, __lowercase : int=0 ):
lowercase__ = floats_tensor((1, 3, 128, 128), rng=random.Random(__SCREAMING_SNAKE_CASE ) )
lowercase__ = np.random.RandomState(__SCREAMING_SNAKE_CASE )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def A__ ( self : Any ):
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def A__ ( self : Union[str, Any] ):
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
lowercase__ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A__ ( self : Union[str, Any] ):
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# warmup pass to apply optimizations
lowercase__ = pipe(**self.get_dummy_inputs() )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A__ ( self : Tuple ):
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
lowercase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A__ ( self : List[str] ):
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
lowercase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A__ ( self : str ):
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**__SCREAMING_SNAKE_CASE ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase):
@property
def A__ ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A__ ( self : Any ):
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def A__ ( self : Dict ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase__ = init_image.resize((768, 512) )
# using the PNDM scheduler by default
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__SCREAMING_SNAKE_CASE, feature_extractor=__SCREAMING_SNAKE_CASE, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase__ = "A fantasy landscape, trending on artstation"
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=__SCREAMING_SNAKE_CASE, image=__SCREAMING_SNAKE_CASE, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=__SCREAMING_SNAKE_CASE, output_type="np", )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowercase__ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def A__ ( self : str ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase__ = init_image.resize((768, 512) )
lowercase__ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" )
lowercase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=__SCREAMING_SNAKE_CASE, safety_checker=__SCREAMING_SNAKE_CASE, feature_extractor=__SCREAMING_SNAKE_CASE, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase__ = "A fantasy landscape, trending on artstation"
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=__SCREAMING_SNAKE_CASE, image=__SCREAMING_SNAKE_CASE, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=__SCREAMING_SNAKE_CASE, output_type="np", )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowercase__ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 363
|
import argparse
import json
import subprocess
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
lowercase__ = (
f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
lowercase__ = subprocess.run(SCREAMING_SNAKE_CASE_ , shell=SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode("utf-8" )
lowercase__ = json.loads(SCREAMING_SNAKE_CASE_ )
lowercase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE_ )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(f'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return values.split("," )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 224
| 0
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_a = '.'
if __name__ == "__main__":
_a = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
_a = []
_a = []
with open(doctest_file_path) as fp:
for line in fp:
_a = line.strip()
_a = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_a = '\n'.join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 61
|
'''simple docstring'''
import argparse
import os
import re
_UpperCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
_UpperCAmelCase : Any = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_UpperCAmelCase : List[Any] = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_UpperCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_UpperCAmelCase : Tuple = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_UpperCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = _re_indent.search(lowerCamelCase)
return "" if search is None else search.groups()[0]
def __magic_name__( lowerCamelCase, lowerCamelCase="", lowerCamelCase=None, lowerCamelCase=None):
__lowerCAmelCase = 0
__lowerCAmelCase = code.split('''\n''')
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase):
index += 1
__lowerCAmelCase = ['''\n'''.join(lines[:index])]
else:
__lowerCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCAmelCase = [lines[index]]
index += 1
while index < len(lowerCamelCase) and (end_prompt is None or not lines[index].startswith(lowerCamelCase)):
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
if len(lowerCamelCase) > 0 and get_indent(current_block[-1]).startswith(indent_level + ''' '''):
current_block.append(lines[index])
blocks.append('''\n'''.join(lowerCamelCase))
if index < len(lowerCamelCase) - 1:
__lowerCAmelCase = [lines[index + 1]]
index += 1
else:
__lowerCAmelCase = []
else:
blocks.append('''\n'''.join(lowerCamelCase))
__lowerCAmelCase = [lines[index]]
else:
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase) > 0:
blocks.append('''\n'''.join(lowerCamelCase))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase):
blocks.append('''\n'''.join(lines[index:]))
return blocks
def __magic_name__( lowerCamelCase):
def _inner(lowerCamelCase):
return key(lowerCamelCase).lower().replace('''_''', '''''')
return _inner
def __magic_name__( lowerCamelCase, lowerCamelCase=None):
# If no key is provided, we use a noop.
def noop(lowerCamelCase):
return x
if key is None:
__lowerCAmelCase = noop
# Constants are all uppercase, they go first.
__lowerCAmelCase = [obj for obj in objects if key(lowerCamelCase).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCAmelCase = [obj for obj in objects if key(lowerCamelCase)[0].isupper() and not key(lowerCamelCase).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCAmelCase = [obj for obj in objects if not key(lowerCamelCase)[0].isupper()]
__lowerCAmelCase = ignore_underscore(lowerCamelCase)
return sorted(lowerCamelCase, key=lowerCamelCase) + sorted(lowerCamelCase, key=lowerCamelCase) + sorted(lowerCamelCase, key=lowerCamelCase)
def __magic_name__( lowerCamelCase):
# This inner function sort imports between [ ].
def _replace(lowerCamelCase):
__lowerCAmelCase = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
__lowerCAmelCase = [part.strip().replace('''"''', '''''') for part in imports.split(''',''')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
__lowerCAmelCase = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase)]) + "]"
__lowerCAmelCase = import_statement.split('''\n''')
if len(lowerCamelCase) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCAmelCase = 2 if lines[1].strip() == '''[''' else 1
__lowerCAmelCase = [(i, _re_strip_line.search(lowerCamelCase).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
__lowerCAmelCase = sort_objects(lowerCamelCase, key=lambda lowerCamelCase: x[1])
__lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(lowerCamelCase) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
__lowerCAmelCase = _re_bracket_content.sub(_replace, lines[1])
else:
__lowerCAmelCase = [part.strip().replace('''"''', '''''') for part in lines[1].split(''',''')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
__lowerCAmelCase = keys[:-1]
__lowerCAmelCase = get_indent(lines[1]) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase)])
return "\n".join(lowerCamelCase)
else:
# Finally we have to deal with imports fitting on one line
__lowerCAmelCase = _re_bracket_content.sub(_replace, lowerCamelCase)
return import_statement
def __magic_name__( lowerCamelCase, lowerCamelCase=True):
with open(lowerCamelCase, encoding='''utf-8''') as f:
__lowerCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCAmelCase = split_code_in_indented_blocks(
lowerCamelCase, start_prompt='''_import_structure = {''', end_prompt='''if TYPE_CHECKING:''')
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(lowerCamelCase) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCAmelCase = main_blocks[block_idx]
__lowerCAmelCase = block.split('''\n''')
# Get to the start of the imports.
__lowerCAmelCase = 0
while line_idx < len(lowerCamelCase) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCAmelCase = len(lowerCamelCase)
else:
line_idx += 1
if line_idx >= len(lowerCamelCase):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCAmelCase = '''\n'''.join(block_lines[line_idx:-1])
__lowerCAmelCase = get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
__lowerCAmelCase = split_code_in_indented_blocks(lowerCamelCase, indent_level=lowerCamelCase)
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCAmelCase = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCAmelCase = [(pattern.search(lowerCamelCase).groups()[0] if pattern.search(lowerCamelCase) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCAmelCase = [(i, key) for i, key in enumerate(lowerCamelCase) if key is not None]
__lowerCAmelCase = [x[0] for x in sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCAmelCase = 0
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i])
else:
__lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(lowerCamelCase)
count += 1
# And we put our main block back together with its first and last line.
__lowerCAmelCase = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])
if code != "\n".join(lowerCamelCase):
if check_only:
return True
else:
print(F"""Overwriting {file}.""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write('''\n'''.join(lowerCamelCase))
def __magic_name__( lowerCamelCase=True):
__lowerCAmelCase = []
for root, _, files in os.walk(lowerCamelCase):
if "__init__.py" in files:
__lowerCAmelCase = sort_imports(os.path.join(lowerCamelCase, '''__init__.py'''), check_only=lowerCamelCase)
if result:
__lowerCAmelCase = [os.path.join(lowerCamelCase, '''__init__.py''')]
if len(lowerCamelCase) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase)} files, run `make style`.""")
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_UpperCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 174
| 0
|
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase__ : List[Any] = parse(importlib.metadata.version('torch'))
def a__ ( lowercase : Union[str, Version], lowercase : str, lowercase : str ) -> List[str]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_UpperCamelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase, lowercase ):
_UpperCamelCase = parse(importlib.metadata.version(lowercase ) )
return operation(lowercase, parse(lowercase ) )
def a__ ( lowercase : str, lowercase : str ) -> List[Any]:
"""simple docstring"""
return compare_versions(lowercase, lowercase, lowercase )
| 287
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'biogpt'
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=42384 , lowerCAmelCase__ : Optional[int]=1024 , lowerCAmelCase__ : List[str]=24 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Optional[int]=4096 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=1e-1_2 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Optional[Any]=2 , **lowerCAmelCase__ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_cache
_UpperCamelCase = layerdrop
_UpperCamelCase = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 287
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__UpperCamelCase : str = TypeVar("""T""")
class __SCREAMING_SNAKE_CASE( Generic[T] ):
def __init__( self: str , UpperCamelCase: bool = True ) -> None:
snake_case__ = {} # dictionary of lists
snake_case__ = directed
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: T , UpperCamelCase: T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
self.adj_list[destination_vertex].append(UpperCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
snake_case__ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCamelCase )
snake_case__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case__ = [destination_vertex]
snake_case__ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase )
snake_case__ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case__ = [destination_vertex]
snake_case__ = []
return self
def __repr__( self: List[Any] ) -> str:
return pformat(self.adj_list )
| 307
|
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def a_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 307
| 1
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ = SpeechTaTokenizer
lowercase__ = False
lowercase__ = True
def __lowerCAmelCase ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Any = SpeechTaTokenizer(UpperCamelCase_ )
lowerCAmelCase__ : int = AddedToken('''<mask>''' ,lstrip=UpperCamelCase_ ,rstrip=UpperCamelCase_ )
lowerCAmelCase__ : str = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : str ):
lowerCAmelCase__ : Optional[Any] = '''this is a test'''
lowerCAmelCase__ : Optional[int] = '''this is a test'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : List[Any] ,lowercase_ : List[str]=False ,lowercase_ : Any=2_0 ,lowercase_ : Tuple=5 ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = self.get_input_output_texts(UpperCamelCase_ )
lowerCAmelCase__ : Any = tokenizer.encode(UpperCamelCase_ ,add_special_tokens=UpperCamelCase_ )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(UpperCamelCase_ ,clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : int = '''<pad>'''
lowerCAmelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) ,UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) ,UpperCamelCase_ )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-4] ,'''œ''' )
self.assertEqual(vocab_keys[-2] ,'''<mask>''' )
self.assertEqual(vocab_keys[-1] ,'''<ctc_blank>''' )
self.assertEqual(len(UpperCamelCase_ ) ,8_1 )
def __lowerCAmelCase ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size ,7_9 )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : List[str] = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase__ : Optional[Any] = tokenizer.vocab_size
lowerCAmelCase__ : Optional[int] = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase__ : Tuple = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
lowerCAmelCase__ : Dict = tokenizer.add_tokens(UpperCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = tokenizer.vocab_size
lowerCAmelCase__ : List[str] = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ ,0 )
self.assertEqual(UpperCamelCase_ ,UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ ,len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ ,all_size + len(UpperCamelCase_ ) )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' ,add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
lowerCAmelCase__ : Optional[Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
lowerCAmelCase__ : Dict = tokenizer.add_special_tokens(UpperCamelCase_ )
lowerCAmelCase__ : List[str] = tokenizer.vocab_size
lowerCAmelCase__ : List[Any] = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ ,0 )
self.assertEqual(UpperCamelCase_ ,UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ ,len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ ,all_size_a + len(UpperCamelCase_ ) )
lowerCAmelCase__ : List[str] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' ,add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
def __lowerCAmelCase ( self : str ):
pass
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Dict = self.get_tokenizer()
lowerCAmelCase__ : str = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(UpperCamelCase_ ,[SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) ,[4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] ,)
lowerCAmelCase__ : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ ,[SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
lowerCAmelCase__ : str = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
# fmt: off
self.assertListEqual(UpperCamelCase_ ,[4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
lowerCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ ,[SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __lowerCAmelCase ( self : str ):
# Use custom sequence because this tokenizer does not handle numbers.
lowerCAmelCase__ : Any = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
lowerCAmelCase__ : str = {
'''input_ids''': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ ,model_name='''microsoft/speecht5_asr''' ,revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' ,sequences=UpperCamelCase_ ,)
| 357
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 74
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case_ ( __A ):
__A : Union[str, Any] = "visual_bert"
def __init__( self : Any , lowercase_ : Any=3_05_22 , lowercase_ : List[Any]=7_68 , lowercase_ : List[str]=5_12 , lowercase_ : Dict=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : List[Any]=30_72 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : int=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : int=2 , lowercase_ : List[Any]=0.02 , lowercase_ : List[Any]=1E-12 , lowercase_ : str=False , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=1 , lowercase_ : List[str]=0 , lowercase_ : str=2 , **lowercase_ : Dict , ) -> List[str]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = hidden_size
lowercase__ : str = visual_embedding_dim
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Dict = initializer_range
lowercase__ : str = type_vocab_size
lowercase__ : Any = layer_norm_eps
lowercase__ : Tuple = bypass_transformer
lowercase__ : Tuple = special_visual_initialize
| 87
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273
| 0
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _snake_case ( lowerCamelCase__ : Union[str, Any] ) -> int:
return 1.0 / (1.0 + np.exp(-_outputs ))
def _snake_case ( lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
lowerCamelCase_ : Any =np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
lowerCamelCase_ : Dict =np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class snake_case__ ( __lowercase ):
_UpperCAmelCase :Optional[Any] = '''sigmoid'''
_UpperCAmelCase :Tuple = '''softmax'''
_UpperCAmelCase :Any = '''none'''
@add_end_docstrings(
__lowercase, R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n ", )
class snake_case__ ( __lowercase ):
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :List[str] = ClassificationFunction.NONE
def __init__( self : int , **snake_case__ : int ):
super().__init__(**snake_case__ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCAmelCase__ ( self : Dict , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : List[str]="" , **snake_case__ : int ):
lowerCamelCase_ : List[Any] =tokenizer_kwargs
lowerCamelCase_ : Union[str, Any] ={}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
lowerCamelCase_ : Tuple =self.model.config.return_all_scores
if isinstance(snake_case__ , snake_case__ ) or top_k is None:
lowerCamelCase_ : Optional[Any] =top_k
lowerCamelCase_ : Tuple =False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case__ , )
if return_all_scores:
lowerCamelCase_ : Any =None
else:
lowerCamelCase_ : Optional[Any] =1
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Any =ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCamelCase_ : List[Any] =function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , *snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ):
lowerCamelCase_ : List[Any] =super().__call__(*snake_case__ , **snake_case__ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCamelCase_ : str ="""top_k""" not in kwargs
if isinstance(args[0] , snake_case__ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCAmelCase__ ( self : str , snake_case__ : int , **snake_case__ : List[str] ):
lowerCamelCase_ : str =self.framework
if isinstance(snake_case__ , snake_case__ ):
return self.tokenizer(**snake_case__ , return_tensors=snake_case__ , **snake_case__ )
elif isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1 and isinstance(inputs[0] , snake_case__ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case__ , **snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : str ):
return self.model(**snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : Dict , snake_case__ : int=None , snake_case__ : Tuple=1 , snake_case__ : int=True ):
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCamelCase_ : Optional[Any] =ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCamelCase_ : int =ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
lowerCamelCase_ : Optional[int] =self.model.config.function_to_apply
else:
lowerCamelCase_ : Dict =ClassificationFunction.NONE
lowerCamelCase_ : int =model_outputs["""logits"""][0]
lowerCamelCase_ : List[str] =outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCamelCase_ : Optional[int] =sigmoid(snake_case__ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCamelCase_ : Optional[Any] =softmax(snake_case__ )
elif function_to_apply == ClassificationFunction.NONE:
lowerCamelCase_ : List[str] =outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCamelCase_ : Optional[int] =[
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(snake_case__ )
]
if not _legacy:
dict_scores.sort(key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k is not None:
lowerCamelCase_ : str =dict_scores[:top_k]
return dict_scores
| 363
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A__ : Optional[int] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209
| 0
|
def A_ ( _lowerCAmelCase ) -> str:
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
UpperCamelCase : List[str] = ""
while len(_lowerCAmelCase ) % 3 != 0:
UpperCamelCase : int = "0" + bin_string
UpperCamelCase : Optional[int] = [
bin_string[index : index + 3]
for index in range(len(_lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase : Any = 0
for index, val in enumerate(_lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCAmelCase ) )
oct_string += str(_lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 52
|
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : int = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = None
ops.enable_eager_execution_internal()
lowerCAmelCase_ : str = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase_ : Dict = tf.config.list_logical_devices(device_type='CPU' )
lowerCAmelCase_ : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase_ : Union[str, Any] = GradientAccumulator()
lowerCAmelCase_ : int = tf.Variable([4.0, 3.0] )
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = create_optimizer(5E-5 , 1_0 , 5 )
lowerCAmelCase_ : Union[str, Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE_ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE_ : Optional[int] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
with strategy.scope():
lowerCAmelCase_ : Tuple = strategy.experimental_local_results(SCREAMING_SNAKE_CASE_ )
local_variables[0].assign(SCREAMING_SNAKE_CASE_ )
local_variables[1].assign(SCREAMING_SNAKE_CASE_ )
strategy.run(SCREAMING_SNAKE_CASE_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE_ )
def _check_local_values(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 224
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Any , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Any ):
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 165
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = TypeVar("""DatasetType""", Dataset, IterableDataset)
def lowercase( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase_ ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.""" )
if i == 0:
UpperCamelCase , UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ )
else:
return _interleave_iterable_datasets(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase_ ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.""" )
if i == 0:
UpperCamelCase , UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
else:
return _concatenate_iterable_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
| 165
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["""OwlViTFeatureExtractor"""]
_lowerCamelCase =["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 287
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 111
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase_ = '<<<<<<< This should probably be modified because it mentions: '
lowerCamelCase_ = '=======\n>>>>>>>\n'
lowerCamelCase_ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCamelCase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def SCREAMING_SNAKE_CASE_ ( __A : Namespace ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase_ ( A ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase_ ( __lowerCamelCase : ArgumentParser ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowerCamelCase )
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , *__lowerCamelCase : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_logger("datasets-cli/converting" )
_SCREAMING_SNAKE_CASE = tfds_path
_SCREAMING_SNAKE_CASE = datasets_directory
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_SCREAMING_SNAKE_CASE = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.listdir(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
if not os.path.isfile(__lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowerCamelCase , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.readlines()
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = []
for line in lines:
_SCREAMING_SNAKE_CASE = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_SCREAMING_SNAKE_CASE = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_SCREAMING_SNAKE_CASE = ""
continue
elif "from absl import logging" in out_line:
_SCREAMING_SNAKE_CASE = "from datasets import logging\n"
elif "getLogger" in out_line:
_SCREAMING_SNAKE_CASE = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = list(filter(lambda __lowerCamelCase : e in out_line , __lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCamelCase ) + "\n" )
out_lines.append(__lowerCamelCase )
out_lines.append(__lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_SCREAMING_SNAKE_CASE = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_SCREAMING_SNAKE_CASE = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_SCREAMING_SNAKE_CASE = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_SCREAMING_SNAKE_CASE = True
out_lines.append(__lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_SCREAMING_SNAKE_CASE = f_name.replace(".py" , "" )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCamelCase )
if needs_manual_update:
with_manual_update.append(__lowerCamelCase )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines(__lowerCamelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_SCREAMING_SNAKE_CASE = os.path.basename(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__lowerCamelCase , __lowerCamelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 111
| 1
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "char"
A_ = "bpe"
A_ = "wp"
__lowercase : Dict = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = ["image_processor", "char_tokenizer"]
A_ = "ViTImageProcessor"
A_ = "MgpstrTokenizer"
def __init__( self , __a=None , __a=None , **__a ):
'''simple docstring'''
__a : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __a , )
__a : Any = kwargs.pop('feature_extractor' )
__a : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__a : Any = tokenizer
__a : int = AutoTokenizer.from_pretrained('gpt2' )
__a : List[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__a : List[str] = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None:
__a : Optional[int] = self.char_tokenizer(__a , return_tensors=__a , **__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__a : Dict = encodings['input_ids']
return inputs
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a , __a , __a : Optional[int] = sequences
__a : List[Any] = char_preds.size(0 )
__a , __a : int = self._decode_helper(__a , 'char' )
__a , __a : List[str] = self._decode_helper(__a , 'bpe' )
__a , __a : Dict = self._decode_helper(__a , 'wp' )
__a : Dict = []
__a : Optional[Any] = []
for i in range(__a ):
__a : Dict = [char_scores[i], bpe_scores[i], wp_scores[i]]
__a : Union[str, Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__a : int = scores.index(max(__a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__a : Union[str, Any] = {}
__a : Optional[Any] = final_strs
__a : List[Any] = final_scores
__a : Tuple = char_strs
__a : Optional[int] = bpe_strs
__a : Optional[Any] = wp_strs
return out
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
__a : str = self.char_decode
__a : Tuple = 1
__a : Optional[int] = '[s]'
elif format == DecodeType.BPE:
__a : str = self.bpe_decode
__a : Optional[int] = 2
__a : Tuple = '#'
elif format == DecodeType.WORDPIECE:
__a : List[str] = self.wp_decode
__a : str = 102
__a : Any = '[SEP]'
else:
raise ValueError(f"""Format {format} is not supported.""" )
__a , __a : Tuple = [], []
__a : Union[str, Any] = pred_logits.size(0 )
__a : Union[str, Any] = pred_logits.size(1 )
__a , __a : Optional[Any] = pred_logits.topk(1 , dim=-1 , largest=__a , sorted=__a )
__a : str = preds_index.view(-1 , __a )[:, 1:]
__a : Optional[int] = decoder(__a )
__a , __a : Tuple = torch.nn.functional.softmax(__a , dim=2 ).max(dim=2 )
__a : List[str] = preds_max_prob[:, 1:]
for index in range(__a ):
__a : int = preds_str[index].find(__a )
__a : Dict = preds_str[index][:pred_eos]
__a : List[str] = preds_index[index].cpu().tolist()
__a : List[Any] = pred_index.index(__a ) if eos_token in pred_index else -1
__a : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
__a : List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__a )
conf_scores.append(__a )
return dec_strs, conf_scores
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Union[str, Any] = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(__a )]
return decode_strs
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : str = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(__a )]
return decode_strs
| 27
|
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp
| 74
| 0
|
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_snake_case = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def A ( _lowerCamelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def A ( _lowerCamelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase : Tuple = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
| 353
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 0
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
snake_case_ = torch.tensor(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ).unsqueeze(0 ) # Batch size 1
snake_case_ = model(SCREAMING_SNAKE_CASE__ )[0] # The last hidden-state is the first element of the output tuple
snake_case_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
snake_case_ = logits[0, masked_index, :]
snake_case_ = logits.softmax(dim=0 )
snake_case_, snake_case_ = prob.topk(k=SCREAMING_SNAKE_CASE__ , dim=0 )
snake_case_ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] )
snake_case_ = tokenizer.mask_token
snake_case_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
snake_case_ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(SCREAMING_SNAKE_CASE__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCAmelCase_ = CamembertTokenizer.from_pretrained('''camembert-base''')
lowerCAmelCase_ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowerCAmelCase_ = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 8
|
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
return "".join(chr(ord(__snake_case ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 209
| 0
|
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Any = """dandelin/vilt-b32-finetuned-vqa"""
a_ : Optional[int] = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
a_ : Tuple = """image_qa"""
a_ : Optional[int] = AutoProcessor
a_ : Dict = AutoModelForVisualQuestionAnswering
a_ : int = ["""image""", """text"""]
a_ : Optional[Any] = ["""text"""]
def __init__( self : List[str] , *a_ : Optional[Any] , **a_ : Dict ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def lowerCamelCase ( self : int , a_ : Dict , a_ : Optional[Any] ):
return self.pre_processor(__a , __a , return_tensors="pt" )
def lowerCamelCase ( self : str , a_ : Dict ):
with torch.no_grad():
return self.model(**__a ).logits
def lowerCamelCase ( self : Dict , a_ : str ):
lowerCAmelCase_ : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 362
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 161
| 0
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
A_ : Tuple = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ = R""".*/layers_(\d+)"""
SCREAMING_SNAKE_CASE__ = key
if re.match(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE__ = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = R"""(encoder|decoder)\/"""
if re.match(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE__ = re.match(snake_case__ , snake_case__ ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE__ = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , snake_case__ )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE__ = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , snake_case__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE__ = new_key.replace(snake_case__ , snake_case__ )
print(f"""{key} -> {new_key}""" )
SCREAMING_SNAKE_CASE__ = s_dict.pop(snake_case__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE__ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE__ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE__ = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE__ = s_dict[key]
for idx in range(snake_case__ ):
SCREAMING_SNAKE_CASE__ = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(snake_case__ )
return s_dict
A_ : str = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
import regex as re
with open(snake_case__ , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
SCREAMING_SNAKE_CASE__ = re.findall(R"""(.*) = ([0-9.]*)""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE__ = float(snake_case__ ) if """.""" in value else int(snake_case__ )
SCREAMING_SNAKE_CASE__ = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , snake_case__ )[0]
SCREAMING_SNAKE_CASE__ = str(activation[1] )
SCREAMING_SNAKE_CASE__ = num_experts
SCREAMING_SNAKE_CASE__ = SwitchTransformersConfig(**snake_case__ )
return config
def A ( snake_case__ , snake_case__ , snake_case__=None , snake_case__="./" , snake_case__=8 ):
'''simple docstring'''
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(snake_case__ )
if gin_file is not None:
SCREAMING_SNAKE_CASE__ = convert_gin_to_config(snake_case__ , snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = SwitchTransformersConfig.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE__ = SwitchTransformersForConditionalGeneration(snake_case__ )
SCREAMING_SNAKE_CASE__ = flax_params["""target"""]
SCREAMING_SNAKE_CASE__ = flatten_dict(snake_case__ , sep="""/""" )
SCREAMING_SNAKE_CASE__ = rename_keys(snake_case__ )
SCREAMING_SNAKE_CASE__ = unflatten_dict(snake_case__ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
A_ : Dict = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 165
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
SCREAMING_SNAKE_CASE__ = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE__ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE__ = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE__ = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def A ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(variables["""target"""] )
SCREAMING_SNAKE_CASE__ = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE__ = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE__ = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = wi[0].T
SCREAMING_SNAKE_CASE__ = wi[1].T
else:
SCREAMING_SNAKE_CASE__ = wi.T
SCREAMING_SNAKE_CASE__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
SCREAMING_SNAKE_CASE__ = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
SCREAMING_SNAKE_CASE__ = layer_norm
SCREAMING_SNAKE_CASE__ = k.T
SCREAMING_SNAKE_CASE__ = o.T
SCREAMING_SNAKE_CASE__ = q.T
SCREAMING_SNAKE_CASE__ = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE__ = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = wi[0].T
SCREAMING_SNAKE_CASE__ = wi[1].T
else:
SCREAMING_SNAKE_CASE__ = wi.T
SCREAMING_SNAKE_CASE__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE__ = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
SCREAMING_SNAKE_CASE__ = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE__ = old["""decoder/logits_dense/kernel"""].T
return new
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
SCREAMING_SNAKE_CASE__ = state_dict["""shared.weight"""]
return state_dict
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(snake_case__ )
SCREAMING_SNAKE_CASE__ = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
SCREAMING_SNAKE_CASE__ = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = MTaConfig.from_json_file(snake_case__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE__ = UMTaEncoderModel(snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
A_ : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 165
| 1
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Any ) -> Optional[int]:
_UpperCAmelCase : Dict = ""
for i in table:
res += inp[i - 1]
return res
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Dict:
return data[1:] + data[0]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: str ) -> Optional[int]:
_UpperCAmelCase : int = ""
for i in range(len(lowerCAmelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: List[str] ) -> Tuple:
_UpperCAmelCase : Optional[Any] = int("0b" + data[0] + data[-1] , 2 )
_UpperCAmelCase : Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple , lowerCAmelCase: Tuple , lowerCAmelCase: str , lowerCAmelCase: Union[str, Any] ) -> int:
_UpperCAmelCase : str = message[:4]
_UpperCAmelCase : Optional[int] = message[4:]
_UpperCAmelCase : int = apply_table(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Any = xor(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = apply_sbox(lowerCAmelCase , temp[:4] ) # noqa: E741
_UpperCAmelCase : Dict = apply_sbox(lowerCAmelCase , temp[4:] )
_UpperCAmelCase : Any = "0" * (2 - len(lowerCAmelCase )) + l # noqa: E741
_UpperCAmelCase : Optional[Any] = "0" * (2 - len(lowerCAmelCase )) + r
_UpperCAmelCase : Tuple = apply_table(l + r , lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = xor(lowerCAmelCase , lowerCAmelCase )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter 10 bit key: ')
SCREAMING_SNAKE_CASE_ = input('Enter 8 bit message: ')
SCREAMING_SNAKE_CASE_ = [6, 3, 7, 4, 8, 5, 10, 9]
SCREAMING_SNAKE_CASE_ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE_ = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE_ = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE_ = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE_ = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE_ = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE_ = temp[:5]
SCREAMING_SNAKE_CASE_ = temp[5:]
SCREAMING_SNAKE_CASE_ = left_shift(left)
SCREAMING_SNAKE_CASE_ = left_shift(right)
SCREAMING_SNAKE_CASE_ = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE_ = left_shift(left)
SCREAMING_SNAKE_CASE_ = left_shift(right)
SCREAMING_SNAKE_CASE_ = left_shift(left)
SCREAMING_SNAKE_CASE_ = left_shift(right)
SCREAMING_SNAKE_CASE_ = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE_ = apply_table(message, IP)
SCREAMING_SNAKE_CASE_ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE_ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE_ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE_ = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
SCREAMING_SNAKE_CASE_ = apply_table(CT, IP)
SCREAMING_SNAKE_CASE_ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE_ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE_ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE_ = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 189
|
from __future__ import annotations
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = order
# a_{0} ... a_{k}
_UpperCAmelCase : Tuple = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_UpperCAmelCase : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_UpperCAmelCase : Optional[Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
_UpperCAmelCase : Dict = [0.0] * self.order
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if len(A_ ) < self.order:
_UpperCAmelCase : List[str] = [1.0, *a_coeffs]
if len(A_ ) != self.order + 1:
_UpperCAmelCase : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(A_ )}'
)
raise ValueError(A_ )
if len(A_ ) != self.order + 1:
_UpperCAmelCase : int = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(A_ )}'
)
raise ValueError(A_ )
_UpperCAmelCase : Optional[Any] = a_coeffs
_UpperCAmelCase : Union[str, Any] = b_coeffs
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_UpperCAmelCase : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_UpperCAmelCase : Optional[Any] = self.input_history[:-1]
_UpperCAmelCase : Optional[int] = self.output_history[:-1]
_UpperCAmelCase : Optional[Any] = sample
_UpperCAmelCase : str = result
return result
| 189
| 1
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = RoCBertTokenizer
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_non_english
def UpperCAmelCase__ ( self : Any ):
super().setUp()
__snake_case: Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__snake_case: Any = {}
__snake_case: List[str] = {}
for i, value in enumerate(A ):
__snake_case: List[str] = i
__snake_case: Optional[Any] = i
__snake_case: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
__snake_case: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(A , A , ensure_ascii=A )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(A , A , ensure_ascii=A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__snake_case: Union[str, Any] = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(A , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Dict = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: List[str] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Any ):
__snake_case: List[str] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Dict = RoCBertBasicTokenizer(do_lower_case=A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__snake_case: Optional[Any] = {}
for i, token in enumerate(A ):
__snake_case: List[str] = i
__snake_case: Optional[Any] = RoCBertWordpieceTokenizer(vocab=A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCAmelCase__ ( self : Any ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
__snake_case: Union[str, Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def UpperCAmelCase__ ( self : List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Any = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__snake_case: Tuple = tokenizer_r.encode_plus(
A , return_attention_mask=A , return_token_type_ids=A , return_offsets_mapping=A , add_special_tokens=A , )
__snake_case: Dict = tokenizer_r.do_lower_case if hasattr(A , """do_lower_case""" ) else False
__snake_case: Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = ["""的""", """人""", """有"""]
__snake_case: int = """""".join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[Any] = True
__snake_case: Optional[int] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tokenizer_p.encode(A , add_special_tokens=A )
__snake_case: str = tokenizer_r.encode(A , add_special_tokens=A )
__snake_case: Dict = tokenizer_r.convert_ids_to_tokens(A )
__snake_case: Tuple = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A , A )
self.assertListEqual(A , A )
__snake_case: Union[str, Any] = False
__snake_case: Any = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Dict = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: str = tokenizer_r.encode(A , add_special_tokens=A )
__snake_case: Optional[int] = tokenizer_p.encode(A , add_special_tokens=A )
__snake_case: Optional[int] = tokenizer_r.convert_ids_to_tokens(A )
__snake_case: int = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
__snake_case: Tuple = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__snake_case: int = tokenizer.encode("""你好""" , add_special_tokens=A )
__snake_case: Optional[int] = tokenizer.encode("""你是谁""" , add_special_tokens=A )
__snake_case: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
__snake_case: Tuple = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Union[str, Any] = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case: List[Any] = """你好,你是谁"""
__snake_case: int = tokenizer.tokenize(A )
__snake_case: Dict = tokenizer.convert_tokens_to_ids(A )
__snake_case: Tuple = tokenizer.convert_tokens_to_shape_ids(A )
__snake_case: Tuple = tokenizer.convert_tokens_to_pronunciation_ids(A )
__snake_case: List[Any] = tokenizer.prepare_for_model(
A , A , A , add_special_tokens=A )
__snake_case: Tuple = tokenizer.encode_plus(A , add_special_tokens=A )
self.assertEqual(A , A )
| 111
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE__ = 1_0001) -> int:
try:
__snake_case: List[str] = int(SCREAMING_SNAKE_CASE__)
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""") from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""")
__snake_case: list[int] = []
__snake_case: List[str] = 2
while len(SCREAMING_SNAKE_CASE__) < nth:
if is_prime(SCREAMING_SNAKE_CASE__):
primes.append(SCREAMING_SNAKE_CASE__)
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 111
| 1
|
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__snake_case = logging.getLogger(__name__)
class lowercase ( A__ ):
"""simple docstring"""
_a = 'token-classification'
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
if type(UpperCamelCase_ ) == dict:
UpperCamelCase__ :List[str] = Namespace(**UpperCamelCase_ )
UpperCamelCase__ :Any = import_module('''tasks''' )
try:
UpperCamelCase__ :int = getattr(UpperCamelCase_ , hparams.task_type )
UpperCamelCase__ :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCamelCase__ :Dict = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase__ :Dict = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase_ , len(self.labels ) , self.mode )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return self.model(**UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ :str = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ :Union[str, Any] = self(**UpperCamelCase_ )
UpperCamelCase__ :str = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ :Dict = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
UpperCamelCase__ :Dict = torch.load(UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCamelCase__ :int = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.token_classification_task.convert_examples_to_features(
UpperCamelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
UpperCamelCase__ :Dict = torch.load(UpperCamelCase_ )
UpperCamelCase__ :int = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase__ :Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase__ :Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase__ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
"""Compute validation""" ""
UpperCamelCase__ :int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ :str = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ :Union[str, Any] = self(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Dict = outputs[:2]
UpperCamelCase__ :List[Any] = logits.detach().cpu().numpy()
UpperCamelCase__ :List[str] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCamelCase__ :Optional[int] = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCamelCase__ :str = np.argmax(UpperCamelCase_ , axis=2 )
UpperCamelCase__ :Union[str, Any] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCamelCase__ :Dict = dict(enumerate(self.labels ) )
UpperCamelCase__ :Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ :int = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase__ :str = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
'''precision''': precision_score(UpperCamelCase_ , UpperCamelCase_ ),
'''recall''': recall_score(UpperCamelCase_ , UpperCamelCase_ ),
'''f1''': fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
UpperCamelCase__ :Optional[Any] = dict(results.items() )
UpperCamelCase__ :Optional[int] = results
return ret, preds_list, out_label_list
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = self._eval_end(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self._eval_end(UpperCamelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ :Union[str, Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=UpperCamelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=UpperCamelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__snake_case = NERTransformer.add_model_specific_args(parser, os.getcwd())
__snake_case = parser.parse_args()
__snake_case = NERTransformer(args)
__snake_case = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__snake_case = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__snake_case = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 219
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def a ( __a ) -> None:
'''simple docstring'''
create_state_space_tree(__a , [] , 0 )
def a ( __a , __a , __a ) -> None:
'''simple docstring'''
if index == len(__a ):
print(__a )
return
create_state_space_tree(__a , __a , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__a , __a , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 219
| 1
|
import os
import time
import numpy as np
import onnxruntime as ort
_A = '1'
_A = '0'
_A = '1'
_A = ort.SessionOptions()
_A = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
_A = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
_A = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
_A = ort.RunOptions()
_A = 128
_A = 1
_A = np.ones((batch, sequence), dtype=np.intaa)
_A = np.ones((batch, sequence), dtype=np.intaa)
_A = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
_A = time.time()
_A = 2000
_A = {}
for iter in range(max_iters):
_A = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
| 62
|
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[float] ) -> bool:
if len(_lowerCAmelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A_ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 0
|
import os
__UpperCAmelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def __lowerCamelCase ( __magic_name__ : str ):
a__: str =0
a__: Any =0
while index < len(__magic_name__ ) - 1:
a__: str =SYMBOLS[numerals[index]]
a__: int =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowerCamelCase ( __magic_name__ : int ):
a__: Optional[Any] =""
a__: int =num // 1_000
numerals += m_count * "M"
num %= 1_000
a__: Any =num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
a__: str =num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowerCamelCase ( __magic_name__ : str = "/p089_roman.txt" ):
a__: Dict =0
with open(os.path.dirname(__magic_name__ ) + roman_numerals_filename ) as filea:
a__: Dict =filea.readlines()
for line in lines:
a__: List[Any] =line.strip()
a__: Union[str, Any] =parse_roman_numerals(__magic_name__ )
a__: Optional[Any] =generate_roman_numerals(__magic_name__ )
savings += len(__magic_name__ ) - len(__magic_name__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 365
|
from __future__ import annotations
from math import gcd
def __lowerCamelCase ( __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 1 , __magic_name__ : int = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> int:
return (pow(__magic_name__ , 2 ) + step) % modulus
for _ in range(__magic_name__ ):
# These track the position within the cycle detection logic.
a__: List[Any] =seed
a__: Optional[int] =seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
a__: List[Any] =rand_fn(__magic_name__ , __magic_name__ , __magic_name__ )
a__: Tuple =rand_fn(__magic_name__ , __magic_name__ , __magic_name__ )
a__: Tuple =rand_fn(__magic_name__ , __magic_name__ , __magic_name__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
a__: Optional[Any] =gcd(hare - tortoise , __magic_name__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
a__: Dict =hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
__UpperCAmelCase = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 42
| 0
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=30 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=0.6 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :int = parent
UpperCamelCase__ :List[str] = batch_size
UpperCamelCase__ :Optional[int] = image_size
UpperCamelCase__ :List[str] = patch_size
UpperCamelCase__ :int = num_channels
UpperCamelCase__ :Tuple = is_training
UpperCamelCase__ :Optional[int] = use_labels
UpperCamelCase__ :Optional[Any] = hidden_size
UpperCamelCase__ :List[str] = num_hidden_layers
UpperCamelCase__ :Optional[int] = num_attention_heads
UpperCamelCase__ :Optional[Any] = intermediate_size
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[Any] = type_sequence_label_size
UpperCamelCase__ :Tuple = initializer_range
UpperCamelCase__ :int = mask_ratio
UpperCamelCase__ :Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ :Union[str, Any] = (image_size // patch_size) ** 2
UpperCamelCase__ :List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Optional[Any] = None
if self.use_labels:
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ViTMAEModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ViTMAEForPreTraining(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(UpperCamelCase_ )
UpperCamelCase__ :str = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ :List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase__ :str = 1
UpperCamelCase__ :Dict = ViTMAEForPreTraining(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :str = model(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = config_and_inputs
UpperCamelCase__ :str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_a = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = ViTMAEModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Tuple = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(UpperCamelCase_ )
UpperCamelCase__ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Tuple = [*signature.parameters.keys()]
UpperCamelCase__ :Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ :Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase__ :List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ :int = torch.from_numpy(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ :List[Any] = pt_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :List[str] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ :List[str] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase__ :Optional[Any] = outputs[0].cpu().numpy()
UpperCamelCase__ :Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :List[str] = model_class.from_pretrained(UpperCamelCase_ )
model.to(UpperCamelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ :Any = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
# Make sure we don't have nans
UpperCamelCase__ :int = after_outputs[0].cpu().numpy()
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Any = ViTMAEModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def a ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ :Tuple = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.default_image_processor
UpperCamelCase__ :Dict = prepare_img()
UpperCamelCase__ :Tuple = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ :Optional[int] = ViTMAEConfig()
UpperCamelCase__ :List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase__ :Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase__ :Dict = model(**UpperCamelCase_ , noise=torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ ) )
# verify the logits
UpperCamelCase__ :str = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCamelCase__ :Dict = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase_ ) , atol=1e-4 ) )
| 97
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
a__ : Dict = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> List[str]:
"""simple docstring"""
__A = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__A = int(re.match(R'.*layer_(\d*).*' , UpperCAmelCase )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def snake_case ( UpperCAmelCase )-> Any:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
__A = re.search(R'[^\d](\d+)$' , str(UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
__A = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> str:
"""simple docstring"""
# Construct model
if bloom_config_file == "":
__A = BloomConfig()
else:
__A = BloomConfig.from_json_file(UpperCAmelCase )
if shard_model:
__A = os.listdir(UpperCAmelCase )
__A = sorted(filter(lambda UpperCAmelCase : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase ) )
__A = {'weight_map': {}, 'metadata': {}}
__A = 0
__A = None
__A = BloomConfig()
for j, file in enumerate(UpperCAmelCase ):
print('Processing file: {}'.format(UpperCAmelCase ) )
__A = None
for i in range(UpperCAmelCase ):
# load all TP files
__A = file.replace('model_00' , f'model_0{i}' )
__A = torch.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
__A = list(temp.keys() )
for key in keys:
__A = temp.pop(UpperCAmelCase )
if tensors is None:
__A = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__A = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__A = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase , os.path.join(
UpperCAmelCase , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__A = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__A = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase ) ).zfill(5 ) )
__A = BloomConfig()
__A = pytorch_dump_folder_path + '/' + CONFIG_NAME
__A = total_size
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
__A = json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + '\n'
f.write(UpperCAmelCase )
else:
__A = BloomModel(UpperCAmelCase )
__A = os.listdir(UpperCAmelCase )
__A = sorted(filter(lambda UpperCAmelCase : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase ) )
__A = None
for i, file in enumerate(UpperCAmelCase ):
__A = None
for i in range(UpperCAmelCase ):
# load all TP files
__A = file.replace('model_00' , f'model_0{i}' )
__A = torch.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
__A = list(temp.keys() )
for key in keys:
__A = temp.pop(UpperCAmelCase )
if tensors is None:
__A = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__A = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__A = tensors[key] / pretraining_tp
__A = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
__A = set(other_keys.missing_keys )
else:
__A = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
__A = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__A = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
__A = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
a__ : Tuple = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 161
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = IFImgaImgSuperResolutionPipeline
_SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
_SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
_SCREAMING_SNAKE_CASE : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_lowercase : Tuple = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 199
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_snake_case = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
_snake_case = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
_snake_case = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _A ( snake_case , snake_case ) -> int:
return float((preds == labels).mean() )
def _A ( snake_case , snake_case ) -> Union[str, Any]:
_lowercase : Any = simple_accuracy(snake_case , snake_case )
_lowercase : List[Any] = float(fa_score(y_true=snake_case , y_pred=snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _A ( snake_case , snake_case ) -> List[str]:
_lowercase : Any = np.array(snake_case )
_lowercase : Any = np.array(snake_case )
_lowercase : str = en_sentvecs.shape[0]
# mean centering
_lowercase : List[Any] = en_sentvecs - np.mean(snake_case , axis=0 )
_lowercase : Tuple = in_sentvecs - np.mean(snake_case , axis=0 )
_lowercase : Any = cdist(snake_case , snake_case , "cosine" )
_lowercase : Any = np.array(range(snake_case ) )
_lowercase : Dict = sim.argsort(axis=1 )[:, :10]
_lowercase : List[str] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_UpperCamelCase , _UpperCamelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_UpperCamelCase , _UpperCamelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_UpperCamelCase , _UpperCamelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 199
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : Any ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class __a ( A__ ):
_lowerCAmelCase : Union[PIL.Image.Image, np.ndarray]
class __a ( A__ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : PriorTransformer , SCREAMING_SNAKE_CASE : CLIPVisionModel , SCREAMING_SNAKE_CASE : CLIPImageProcessor , SCREAMING_SNAKE_CASE : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE : ShapERenderer , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE , image_encoder=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , renderer=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if latents is None:
UpperCamelCase__ : int = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
UpperCamelCase__ : Optional[Any] = latents.to(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase__ : Dict = torch.device(F'cuda:{gpu_id}' )
UpperCamelCase__ : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
def __lowercase ( self : int ):
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ):
UpperCamelCase__ : List[Any] = torch.cat(SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCamelCase__ : Any = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
UpperCamelCase__ : Optional[int] = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.image_encoder(SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCamelCase__ : Optional[int] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCamelCase__ : Any = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ : Tuple = torch.zeros_like(SCREAMING_SNAKE_CASE )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ : Tuple = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 25 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : float = 4.0 , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase__ : str = 1
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCamelCase__ : Tuple = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCamelCase__ : Tuple = len(SCREAMING_SNAKE_CASE )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE )}' )
UpperCamelCase__ : Tuple = self._execution_device
UpperCamelCase__ : List[str] = batch_size * num_images_per_prompt
UpperCamelCase__ : List[Any] = guidance_scale > 1.0
UpperCamelCase__ : Any = self._encode_image(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = self.scheduler.timesteps
UpperCamelCase__ : Any = self.prior.config.num_embeddings
UpperCamelCase__ : Union[str, Any] = self.prior.config.embedding_dim
UpperCamelCase__ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCamelCase__ : Dict = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ : Dict = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = self.prior(
SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , proj_embedding=SCREAMING_SNAKE_CASE , ).predicted_image_embedding
# remove the variance
UpperCamelCase__ , UpperCamelCase__ : Tuple = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCamelCase__ , UpperCamelCase__ : str = noise_pred.chunk(2 )
UpperCamelCase__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCamelCase__ : List[Any] = self.scheduler.step(
SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , sample=SCREAMING_SNAKE_CASE , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE ):
print()
UpperCamelCase__ : Any = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = torch.stack(SCREAMING_SNAKE_CASE )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
UpperCamelCase__ : Tuple = images.cpu().numpy()
if output_type == "pil":
UpperCamelCase__ : Optional[Any] = [self.numpy_to_pil(SCREAMING_SNAKE_CASE ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE )
| 189
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="shi-labs/oneformer_demo" ) -> Tuple:
with open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
UpperCamelCase__ : Optional[Any] = json.load(__lowerCAmelCase )
UpperCamelCase__ : str = {}
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = []
for key, info in class_info.items():
UpperCamelCase__ : List[str] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(__lowerCAmelCase ) )
UpperCamelCase__ : Dict = thing_ids
UpperCamelCase__ : Optional[int] = class_names
return metadata
class __a ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=7 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Tuple=30 , SCREAMING_SNAKE_CASE : Dict=4_00 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[Any]=10 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : int=2_55 , SCREAMING_SNAKE_CASE : str="shi-labs/oneformer_demo" , SCREAMING_SNAKE_CASE : List[Any]="ade20k_panoptic.json" , SCREAMING_SNAKE_CASE : Tuple=10 , ):
'''simple docstring'''
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : Optional[int] = min_resolution
UpperCamelCase__ : Union[str, Any] = max_resolution
UpperCamelCase__ : Optional[int] = do_resize
UpperCamelCase__ : List[Any] = {"shortest_edge": 32, "longest_edge": 13_33} if size is None else size
UpperCamelCase__ : Dict = do_normalize
UpperCamelCase__ : Optional[int] = image_mean
UpperCamelCase__ : Union[str, Any] = image_std
UpperCamelCase__ : Union[str, Any] = class_info_file
UpperCamelCase__ : Tuple = prepare_metadata(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = num_text
UpperCamelCase__ : int = repo_path
# for the post_process_functions
UpperCamelCase__ : int = 2
UpperCamelCase__ : str = 10
UpperCamelCase__ : Any = 10
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : List[Any] = 4
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : Tuple = do_reduce_labels
UpperCamelCase__ : List[str] = ignore_index
def __lowercase ( self : int ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ : str = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ : Any = int(self.size["shortest_edge"] * h / w )
UpperCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
elif w > h:
UpperCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
UpperCamelCase__ : int = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase__ : Optional[Any] = self.size["shortest_edge"]
UpperCamelCase__ : str = self.size["shortest_edge"]
else:
UpperCamelCase__ : Tuple = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ : List[str] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase__ : int = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def __lowercase ( self : Any ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowerCAmelCase : List[str] = image_processing_class
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "ignore_index" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "class_info_file" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_text" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "repo_path" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "metadata" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_reduce_labels" ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : Dict = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase__ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase__ : List[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any="np" ):
'''simple docstring'''
UpperCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCamelCase__ : Any = self.image_processing_tester.num_labels
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
UpperCamelCase__ : Tuple = num_labels
if is_instance_map:
UpperCamelCase__ : List[str] = list(range(SCREAMING_SNAKE_CASE ) ) * 2
UpperCamelCase__ : Optional[Any] = dict(enumerate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCamelCase__ : List[str] = [Image.fromarray(SCREAMING_SNAKE_CASE ) for annotation in annotations]
UpperCamelCase__ : Optional[int] = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , return_tensors="pt" , instance_id_to_semantic_id=SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE , )
return inputs
def __lowercase ( self : int ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
def common(SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : str=None ):
UpperCamelCase__ : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=SCREAMING_SNAKE_CASE , is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = inputs["mask_labels"]
UpperCamelCase__ : Optional[Any] = inputs["class_labels"]
UpperCamelCase__ : List[str] = inputs["pixel_values"]
UpperCamelCase__ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=SCREAMING_SNAKE_CASE )
common(is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type="pil" )
common(is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type="pil" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = np.zeros((20, 50) )
UpperCamelCase__ : int = 1
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : int = binary_mask_to_rle(SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : int = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCamelCase__ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCamelCase__ : Optional[Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE , target_sizes=SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Optional[int] = image_processor.post_process_instance_segmentation(SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Tuple = image_processor.post_process_panoptic_segmentation(SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 189
| 1
|
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> float:
"""simple docstring"""
lowerCamelCase__: Tuple =[redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowerCamelCase__: List[str] =1 - (matter_density + radiation_density + dark_energy)
lowerCamelCase__: List[str] =(
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCamelCase__: int =hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__A = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 273
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__A = logging.get_logger(__name__)
# General docstring
__A = "ResNetConfig"
# Base docstring
__A = "microsoft/resnet-50"
__A = [1, 2048, 7, 7]
# Image classification docstring
__A = "microsoft/resnet-50"
__A = "tiger cat"
__A = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu") ->Any:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Dict =nn.Convad(
UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=kernel_size // 2 , bias=UpperCAmelCase_)
lowerCamelCase__: Any =nn.BatchNormad(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.convolution(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.normalization(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : ResNetConfig) ->str:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
lowerCamelCase__: Optional[int] =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
lowerCamelCase__: Optional[Any] =config.num_channels
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
lowerCamelCase__: Dict =self.embedder(UpperCAmelCase_)
lowerCamelCase__: str =self.pooler(UpperCAmelCase_)
return embedding
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2) ->Any:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[Any] =nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , stride=UpperCAmelCase_ , bias=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =nn.BatchNormad(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.convolution(UpperCAmelCase_)
lowerCamelCase__: Any =self.normalization(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu") ->Tuple:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =in_channels != out_channels or stride != 1
lowerCamelCase__: str =(
ResNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__: Tuple =nn.Sequential(
ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , activation=UpperCAmelCase_) , )
lowerCamelCase__: Optional[Any] =ACTaFN[activation]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Any) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =hidden_state
lowerCamelCase__: List[str] =self.layer(UpperCAmelCase_)
lowerCamelCase__: str =self.shortcut(UpperCAmelCase_)
hidden_state += residual
lowerCamelCase__: Dict =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu" , UpperCAmelCase_ : int = 4) ->Tuple:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Union[str, Any] =in_channels != out_channels or stride != 1
lowerCamelCase__: List[str] =out_channels // reduction
lowerCamelCase__: Optional[Any] =(
ResNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__: Dict =nn.Sequential(
ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , activation=UpperCAmelCase_) , )
lowerCamelCase__: Tuple =ACTaFN[activation]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: str =hidden_state
lowerCamelCase__: Optional[Any] =self.layer(UpperCAmelCase_)
lowerCamelCase__: List[Any] =self.shortcut(UpperCAmelCase_)
hidden_state += residual
lowerCamelCase__: Tuple =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : ResNetConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , ) ->Dict:
'''simple docstring'''
super().__init__()
lowerCamelCase__: List[Any] =ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
lowerCamelCase__: List[str] =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ , activation=config.hidden_act) , *[layer(UpperCAmelCase_ , UpperCAmelCase_ , activation=config.hidden_act) for _ in range(depth - 1)] , )
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: List[Any] =input
for layer in self.layers:
lowerCamelCase__: Any =layer(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : ResNetConfig) ->Any:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
lowerCamelCase__: int =zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(UpperCAmelCase_ , config.depths[1:]):
self.stages.append(ResNetStage(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , depth=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True) ->BaseModelOutputWithNoAttention:
'''simple docstring'''
lowerCamelCase__: str =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__: Union[str, Any] =hidden_states + (hidden_state,)
lowerCamelCase__: str =stage_module(UpperCAmelCase_)
if output_hidden_states:
lowerCamelCase__: Optional[int] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ResNetConfig
lowercase_ = "resnet"
lowercase_ = "pixel_values"
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : int) ->Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu")
elif isinstance(UpperCAmelCase_ , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=False) ->Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: List[str] =value
__A = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCAmelCase_ : str) ->int:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
lowerCamelCase__: str =config
lowerCamelCase__: str =ResNetEmbeddings(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =ResNetEncoder(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None) ->BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowerCamelCase__: Optional[int] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__: Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: int =self.embedder(UpperCAmelCase_)
lowerCamelCase__: int =self.encoder(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_)
lowerCamelCase__: int =encoder_outputs[0]
lowerCamelCase__: Tuple =self.pooler(UpperCAmelCase_)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Any:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
lowerCamelCase__: int =config.num_labels
lowerCamelCase__: Optional[Any] =ResNetModel(UpperCAmelCase_)
# classification head
lowerCamelCase__: Optional[Any] =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ) ->ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowerCamelCase__: Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: List[Any] =self.resnet(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_)
lowerCamelCase__: int =outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__: Dict =self.classifier(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__: Dict ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__: Tuple ="single_label_classification"
else:
lowerCamelCase__: Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
lowerCamelCase__: Dict =MSELoss()
if self.num_labels == 1:
lowerCamelCase__: str =loss_fct(logits.squeeze() , labels.squeeze())
else:
lowerCamelCase__: int =loss_fct(UpperCAmelCase_ , UpperCAmelCase_)
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__: List[Any] =CrossEntropyLoss()
lowerCamelCase__: int =loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__: List[str] =BCEWithLogitsLoss()
lowerCamelCase__: int =loss_fct(UpperCAmelCase_ , UpperCAmelCase_)
if not return_dict:
lowerCamelCase__: List[str] =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states)
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : List[Any]) ->Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
super()._init_backbone(UpperCAmelCase_)
lowerCamelCase__: int =[config.embedding_size] + config.hidden_sizes
lowerCamelCase__: List[Any] =ResNetEmbeddings(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =ResNetEncoder(UpperCAmelCase_)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@replace_return_docstrings(output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None) ->BackboneOutput:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__: int =self.embedder(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.encoder(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_)
lowerCamelCase__: Any =outputs.hidden_states
lowerCamelCase__: int =()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCamelCase__: Dict =(feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase_ , )
| 273
| 1
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__lowerCamelCase : Optional[Any] = get_logger(__name__)
__lowerCamelCase : Union[str, Any] = Path(__file__).parent / '''model_card_template.md'''
__lowerCamelCase : Any = uuida().hex
__lowerCamelCase : str = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
__lowerCamelCase : int = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
__lowerCamelCase : List[str] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[Dict, str, None] = None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase , __UpperCamelCase ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
if token is None:
SCREAMING_SNAKE_CASE__ = HfFolder.get_token()
if organization is None:
SCREAMING_SNAKE_CASE__ = whoami(__UpperCamelCase )["""name"""]
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(__UpperCamelCase , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
SCREAMING_SNAKE_CASE__ = args.hub_token if hasattr(__UpperCamelCase , """hub_token""" ) else None
SCREAMING_SNAKE_CASE__ = get_full_repo_name(__UpperCamelCase , token=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__UpperCamelCase , model_name=__UpperCamelCase , repo_name=__UpperCamelCase , dataset_name=args.dataset_name if hasattr(__UpperCamelCase , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(__UpperCamelCase , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , """README.md""" )
model_card.save(__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[str] , __UpperCamelCase : Optional[str] = None ) -> Union[str, Any]:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
SCREAMING_SNAKE_CASE__ = str(Path(__UpperCamelCase ).as_posix() )
SCREAMING_SNAKE_CASE__ = re.search(r"""snapshots/([^/]+)/""" , __UpperCamelCase )
if search is None:
return None
SCREAMING_SNAKE_CASE__ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__lowerCamelCase : int = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
__lowerCamelCase : str = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[str] = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
SCREAMING_SNAKE_CASE__ = DIFFUSERS_CACHE
if old_cache_dir is None:
SCREAMING_SNAKE_CASE__ = old_diffusers_cache
SCREAMING_SNAKE_CASE__ = Path(__UpperCamelCase ).expanduser()
SCREAMING_SNAKE_CASE__ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
SCREAMING_SNAKE_CASE__ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase , __UpperCamelCase )
try:
os.symlink(__UpperCamelCase , __UpperCamelCase )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__lowerCamelCase : List[str] = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
__lowerCamelCase : List[str] = 0
else:
with open(cache_version_file) as f:
try:
__lowerCamelCase : Dict = int(f.read())
except ValueError:
__lowerCamelCase : Tuple = 0
if cache_version < 1:
__lowerCamelCase : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
__lowerCamelCase : Tuple = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> str:
"""simple docstring"""
if variant is not None:
SCREAMING_SNAKE_CASE__ = weights_name.split(""".""" )
SCREAMING_SNAKE_CASE__ = splits[:-1] + [variant] + splits[-1:]
SCREAMING_SNAKE_CASE__ = """.""".join(__UpperCamelCase )
return weights_name
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , *,
__UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) ):
# Load from a PyTorch checkpoint
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("""0.20.0""" )
):
try:
SCREAMING_SNAKE_CASE__ = hf_hub_download(
__UpperCamelCase , filename=_add_variant(__UpperCamelCase , __UpperCamelCase ) , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , user_agent=__UpperCamelCase , subfolder=__UpperCamelCase , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __UpperCamelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase , __UpperCamelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase , __UpperCamelCase )}' so that the correct variant file can be added.""" , __UpperCamelCase , )
try:
# 2. Load model file as usual
SCREAMING_SNAKE_CASE__ = hf_hub_download(
__UpperCamelCase , filename=__UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , user_agent=__UpperCamelCase , subfolder=__UpperCamelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
"""this model name. Check the model page at """
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 219
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__UpperCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__UpperCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__UpperCamelCase )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ = script_fpath.stem
SCREAMING_SNAKE_CASE__ = importlib.import_module(__UpperCamelCase )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 219
| 1
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_a : List[str] = True
except (ImportError, AttributeError):
_a : List[str] = object
def _A ( *UpperCamelCase_ : Tuple, **UpperCamelCase_ : Tuple) -> Dict:
'''simple docstring'''
pass
_a : str = False
_a : Any = logging.get_logger('transformers-cli/serving')
def _A ( UpperCamelCase_ : Namespace) -> Any:
'''simple docstring'''
__lowercase = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(UpperCamelCase_, args.host, args.port, args.workers)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : dict
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[str]
__UpperCAmelCase : Optional[List[int]]
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : str
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Any
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
@staticmethod
def _lowercase ( UpperCAmelCase__ : ArgumentParser ):
__lowercase = parser.add_parser(
"serve", help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task", type=UpperCAmelCase__, choices=get_supported_tasks(), help="The task to run the pipeline on", )
serve_parser.add_argument("--host", type=UpperCAmelCase__, default="localhost", help="Interface the server will listen on." )
serve_parser.add_argument("--port", type=UpperCAmelCase__, default=8_8_8_8, help="Port the serving will listen to." )
serve_parser.add_argument("--workers", type=UpperCAmelCase__, default=1, help="Number of http workers" )
serve_parser.add_argument("--model", type=UpperCAmelCase__, help="Model's name or path to stored model." )
serve_parser.add_argument("--config", type=UpperCAmelCase__, help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer", type=UpperCAmelCase__, help="Tokenizer name to use." )
serve_parser.add_argument(
"--device", type=UpperCAmelCase__, default=-1, help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)", )
serve_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self : str, UpperCAmelCase__ : Pipeline, UpperCAmelCase__ : str, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
__lowercase = pipeline
__lowercase = host
__lowercase = port
__lowercase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(F"""Serving model over {host}:{port}""" )
__lowercase = FastAPI(
routes=[
APIRoute(
"/", self.model_info, response_model=UpperCAmelCase__, response_class=UpperCAmelCase__, methods=["GET"], ),
APIRoute(
"/tokenize", self.tokenize, response_model=UpperCAmelCase__, response_class=UpperCAmelCase__, methods=["POST"], ),
APIRoute(
"/detokenize", self.detokenize, response_model=UpperCAmelCase__, response_class=UpperCAmelCase__, methods=["POST"], ),
APIRoute(
"/forward", self.forward, response_model=UpperCAmelCase__, response_class=UpperCAmelCase__, methods=["POST"], ),
], timeout=6_0_0, )
def _lowercase ( self : int ):
run(self._app, host=self.host, port=self.port, workers=self.workers )
def _lowercase ( self : Any ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowercase ( self : Dict, UpperCAmelCase__ : str = Body(UpperCAmelCase__, embed=UpperCAmelCase__ ), UpperCAmelCase__ : bool = Body(UpperCAmelCase__, embed=UpperCAmelCase__ ) ):
try:
__lowercase = self._pipeline.tokenizer.tokenize(UpperCAmelCase__ )
if return_ids:
__lowercase = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
return ServeTokenizeResult(tokens=UpperCAmelCase__, tokens_ids=UpperCAmelCase__ )
else:
return ServeTokenizeResult(tokens=UpperCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0, detail={"model": "", "error": str(UpperCAmelCase__ )} )
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[int] = Body(UpperCAmelCase__, embed=UpperCAmelCase__ ), UpperCAmelCase__ : bool = Body(UpperCAmelCase__, embed=UpperCAmelCase__ ), UpperCAmelCase__ : bool = Body(UpperCAmelCase__, embed=UpperCAmelCase__ ), ):
try:
__lowercase = self._pipeline.tokenizer.decode(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return ServeDeTokenizeResult(model="", text=UpperCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0, detail={"model": "", "error": str(UpperCAmelCase__ )} )
async def _lowercase ( self : Tuple, UpperCAmelCase__ : str=Body(UpperCAmelCase__, embed=UpperCAmelCase__ ) ):
# Check we don't have empty string
if len(UpperCAmelCase__ ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
__lowercase = self._pipeline(UpperCAmelCase__ )
return ServeForwardResult(output=UpperCAmelCase__ )
except Exception as e:
raise HTTPException(5_0_0, {"error": str(UpperCAmelCase__ )} )
| 363
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Tuple, UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Dict, UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : Dict, UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : str, UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class snake_case_ ( _lowerCamelCase ):
__A : Union[str, Any] = 42
__A : Union[str, Any] = 42
__A : Tuple = None
class snake_case_ ( _lowerCamelCase ,_lowerCamelCase ):
__A : str = 2
@register_to_config
def __init__( self : int , lowercase_ : List[Any] = 0.02 , lowercase_ : str = 1_00 , lowercase_ : Tuple = 1.0_07 , lowercase_ : Optional[int] = 80 , lowercase_ : Dict = 0.05 , lowercase_ : Any = 50 , ) -> List[str]:
lowercase__ : List[str] = sigma_max
# setable values
lowercase__ : str = None
lowercase__ : Union[str, Any] = None
lowercase__ : Dict = None # sigma(t_i)
def __UpperCamelCase ( self : List[str] , lowercase_ : List[str] , lowercase_ : Union[str, Any] = None ) -> Optional[Any]:
return sample
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any = None ) -> Union[str, Any]:
lowercase__ : Any = num_inference_steps
lowercase__ : Optional[int] = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple = torch.from_numpy(lowerCAmelCase_ ).to(lowerCAmelCase_ )
lowercase__ : Optional[int] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : List[Any] = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa , device=lowerCAmelCase_ )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : int , lowercase_ : List[Any] = None ) -> Optional[int]:
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : int = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase_ ).to(sample.device )
lowercase__ : str = sigma + gamma * sigma
lowercase__ : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCamelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple = True , ) -> Union[str, Any]:
lowercase__ : Any = sample_hat + sigma_hat * model_output
lowercase__ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Any = True , ) -> Tuple:
lowercase__ : Dict = sample_prev + sigma_prev * model_output
lowercase__ : Tuple = (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : str ) -> Optional[Any]:
raise NotImplementedError()
| 87
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
lowercase : Tuple = parser.parse_args()
lowercase : Optional[int] = "cpu"
lowercase : Optional[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
lowercase : Optional[int] = "path-to-your-trained-model"
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Optional[int] = torch.rand(1) * 999
lowercase : Optional[Any] = torch.randn(2, 77, 768)
lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : List[str] = 666
lowercase : Tuple = torch.Generator(device).manual_seed(seed)
lowercase : Union[str, Any] = {"generator": generator}
if args.steps is not None:
lowercase : Dict = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : List[str] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 42
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = TextToVideoSDPipeline
snake_case__ : int = TEXT_TO_IMAGE_PARAMS
snake_case__ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ : Union[str, Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase = CLIPTextModel(__lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = TextToVideoSDPipeline(**__lowerCAmelCase )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = """np"""
lowercase = sd_pipe(**__lowerCAmelCase ).frames
lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowercase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowerCAmelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCAmelCase , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
lowercase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase = pipe.to("""cuda""" )
lowercase = """Spiderman is surfing"""
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=25 , output_type="""pt""" ).frames
lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
lowercase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowercase = pipe.to("""cuda""" )
lowercase = """Spiderman is surfing"""
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""pt""" ).frames
lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 350
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {}
class A ( UpperCamelCase_ ):
UpperCamelCase__ : int ='llama'
UpperCamelCase__ : List[Any] =['past_key_values']
def __init__( self : str , lowercase_ : int=3_2000 , lowercase_ : Optional[int]=4096 , lowercase_ : List[str]=1_1008 , lowercase_ : Dict=32 , lowercase_ : List[str]=32 , lowercase_ : Any=None , lowercase_ : Union[str, Any]="silu" , lowercase_ : Tuple=2048 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[Any]=1E-6 , lowercase_ : int=True , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=1 , lowercase_ : Dict=2 , lowercase_ : int=1 , lowercase_ : Any=False , lowercase_ : Dict=None , **lowercase_ : Union[str, Any] , ) -> Any:
"""simple docstring"""
_lowerCamelCase : int =vocab_size
_lowerCamelCase : List[str] =max_position_embeddings
_lowerCamelCase : List[str] =hidden_size
_lowerCamelCase : Optional[Any] =intermediate_size
_lowerCamelCase : Dict =num_hidden_layers
_lowerCamelCase : Optional[int] =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCamelCase : List[str] =num_attention_heads
_lowerCamelCase : List[Any] =num_key_value_heads
_lowerCamelCase : Optional[int] =hidden_act
_lowerCamelCase : int =initializer_range
_lowerCamelCase : Union[str, Any] =rms_norm_eps
_lowerCamelCase : Tuple =pretraining_tp
_lowerCamelCase : int =use_cache
_lowerCamelCase : List[str] =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
_lowerCamelCase : Union[str, Any] =self.rope_scaling.get('type' , lowercase_ )
_lowerCamelCase : Optional[Any] =self.rope_scaling.get('factor' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 199
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Any =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(('num_inference_steps', 25),)
def lowerCamelCase ( self : Optional[Any] , **lowercase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**lowercase_ )
return config
def lowerCamelCase ( self : Optional[int] , lowercase_ : Optional[Any]=0 , **lowercase_ : Any ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] =dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[Any] =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Optional[int] =self.dummy_sample
_lowerCamelCase : Dict =0.1 * sample
_lowerCamelCase : Optional[int] =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : int =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : str =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : int =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Dict =scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : str =dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCamelCase , _lowerCamelCase : Any =sample, sample
for t in range(lowercase_ , time_step + scheduler.config.solver_order + 1 ):
_lowerCamelCase : List[str] =scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Optional[Any] =new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCamelCase ( self : str , lowercase_ : str=0 , **lowercase_ : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : int =dict(self.forward_default_kwargs )
_lowerCamelCase : Dict =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Optional[int] =self.dummy_sample
_lowerCamelCase : Any =0.1 * sample
_lowerCamelCase : int =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Optional[int] =self.get_scheduler_config()
_lowerCamelCase : Any =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase : Dict =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Optional[int] =scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase : str =dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCamelCase : Dict =scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Dict =new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Any , lowercase_ : Union[str, Any]=None , **lowercase_ : List[str] ) -> str:
"""simple docstring"""
if scheduler is None:
_lowerCamelCase : Tuple =self.scheduler_classes[0]
_lowerCamelCase : Optional[int] =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : Union[str, Any] =scheduler_class(**lowercase_ )
_lowerCamelCase : List[Any] =self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
_lowerCamelCase : str =10
_lowerCamelCase : Union[str, Any] =self.dummy_model()
_lowerCamelCase : Dict =self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : List[str] =model(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[int] =scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCamelCase : Any =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCamelCase : int =50
_lowerCamelCase : Optional[int] =self.dummy_model()
_lowerCamelCase : Tuple =self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCamelCase : Any =model(lowercase_ , lowercase_ )
_lowerCamelCase : List[str] =scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
_lowerCamelCase : int =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCamelCase : Optional[Any] =self.full_loop(scheduler=lowercase_ )
_lowerCamelCase : Tuple =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCamelCase : Dict =DEISMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : Optional[int] =DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : Union[str, Any] =UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : List[str] =DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCamelCase : List[Any] =self.full_loop(scheduler=lowercase_ )
_lowerCamelCase : List[str] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , algorithm_type='dpmsolver++' , solver_order=lowercase_ , solver_type=lowercase_ , )
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , algorithm_type=lowercase_ , )
_lowerCamelCase : Any =self.full_loop(
solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , algorithm_type=lowercase_ , )
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.check_over_configs(variance_type=lowercase_ )
self.check_over_configs(variance_type='learned_range' )
def lowerCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowercase_ , time_step=0 )
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Dict =self.full_loop()
_lowerCamelCase : Dict =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str =self.full_loop(use_karras_sigmas=lowercase_ )
_lowerCamelCase : Optional[Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Any =self.full_loop(prediction_type='v_prediction' )
_lowerCamelCase : Dict =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : str =self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=lowercase_ )
_lowerCamelCase : List[str] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =self.scheduler_classes[0]
_lowerCamelCase : List[str] =self.get_scheduler_config(thresholding=lowercase_ , dynamic_thresholding_ratio=0 )
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
_lowerCamelCase : Optional[Any] =10
_lowerCamelCase : Optional[int] =self.dummy_model()
_lowerCamelCase : List[Any] =self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : str =model(lowercase_ , lowercase_ )
_lowerCamelCase : Any =scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
| 199
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case : Union[List[PIL.Image.Image], np.ndarray]
_snake_case : Optional[List[bool]]
_snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 359
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = ['''input_ids''', '''attention_mask''']
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=None , **_UpperCamelCase , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
UpperCAmelCase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenizer_file=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
UpperCAmelCase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[int] = len(self.sp_model )
UpperCAmelCase_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
UpperCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_ : Any = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase_ : Any = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.__dict__.copy()
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCAmelCase ( self ) -> Tuple:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Tuple = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : Dict = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : int = self.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Any = tgt_lang_id
return inputs
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : str = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Dict = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ''.join(_UpperCamelCase ).replace(_UpperCamelCase , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ) -> BatchEncoding:
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[lang]
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = [self.eos_token_id, self.cur_lang_code]
| 145
| 0
|
'''simple docstring'''
from collections import namedtuple
UpperCAmelCase : str = namedtuple('from_to', 'from_ to')
UpperCAmelCase : Dict = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_0_1, 1_0_0_0),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'cubicfoot': from_to(0.0_2_8, 3_5.3_1_4_7),
'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(_A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(_A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 318
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
SCREAMING_SNAKE_CASE_ : Dict = vocab
SCREAMING_SNAKE_CASE_ : Dict = merges
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( UpperCamelCase : Callable[[float], float] , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : float = a
UpperCAmelCase : float = b
if function(lowerCamelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCamelCase__ ) == 0:
return b
elif (
function(lowerCamelCase__ ) * function(lowerCamelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
UpperCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCamelCase__ ) == 0:
return mid
elif function(lowerCamelCase__ ) * function(lowerCamelCase__ ) < 0:
UpperCAmelCase : Tuple = mid
else:
UpperCAmelCase : List[str] = mid
UpperCAmelCase : Dict = start + (end - start) / 2.0
return mid
def _snake_case ( UpperCamelCase : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 109
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :jnp.ndarray
class lowercase__ ( nn.Module ):
_UpperCAmelCase :int
_UpperCAmelCase :Tuple[int] = (16, 32, 96, 256)
_UpperCAmelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Optional[int] =nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ : Any =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ : Union[str, Any] =self.block_out_channels[i]
lowerCamelCase_ : Any =self.block_out_channels[i + 1]
lowerCamelCase_ : List[str] =nn.Conv(
snake_case__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCamelCase_ : List[str] =nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCamelCase_ : Union[str, Any] =blocks
lowerCamelCase_ : Optional[Any] =nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : int =self.conv_in(snake_case__ )
lowerCamelCase_ : List[Any] =nn.silu(snake_case__ )
for block in self.blocks:
lowerCamelCase_ : Union[str, Any] =block(snake_case__ )
lowerCamelCase_ : List[str] =nn.silu(snake_case__ )
lowerCamelCase_ : Tuple =self.conv_out(snake_case__ )
return embedding
@flax_register_to_config
class lowercase__ ( nn.Module, snake_case__, snake_case__ ):
_UpperCAmelCase :int = 32
_UpperCAmelCase :int = 4
_UpperCAmelCase :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCAmelCase :Union[bool, Tuple[bool]] = False
_UpperCAmelCase :Tuple[int] = (320, 640, 1280, 1280)
_UpperCAmelCase :int = 2
_UpperCAmelCase :Union[int, Tuple[int]] = 8
_UpperCAmelCase :Optional[Union[int, Tuple[int]]] = None
_UpperCAmelCase :int = 1280
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :bool = False
_UpperCAmelCase :jnp.dtype = jnp.floataa
_UpperCAmelCase :bool = True
_UpperCAmelCase :int = 0
_UpperCAmelCase :str = "rgb"
_UpperCAmelCase :Tuple[int] = (16, 32, 96, 256)
def UpperCAmelCase__ ( self : int , snake_case__ : jax.random.KeyArray ):
# init input tensors
lowerCamelCase_ : str =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ : List[Any] =jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCamelCase_ : int =jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ : Union[str, Any] =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ : Optional[int] =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ : Any =jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCamelCase_ , lowerCamelCase_ : Any =jax.random.split(snake_case__ )
lowerCamelCase_ : Tuple ={"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Union[str, Any] =self.block_out_channels
lowerCamelCase_ : Optional[int] =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ : int =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ : Any =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ : Union[str, Any] =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ : List[Any] =FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
lowerCamelCase_ : List[str] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase_ : Optional[int] =self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : str =(only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Any =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ : Optional[int] =[]
lowerCamelCase_ : Optional[Any] =[]
lowerCamelCase_ : str =block_out_channels[0]
lowerCamelCase_ : str =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ : Union[str, Any] =output_channel
lowerCamelCase_ : Tuple =block_out_channels[i]
lowerCamelCase_ : List[Any] =i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ : Tuple =FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase_ : Union[str, Any] =FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
for _ in range(self.layers_per_block ):
lowerCamelCase_ : List[Any] =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
if not is_final_block:
lowerCamelCase_ : Any =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
lowerCamelCase_ : List[str] =down_blocks
lowerCamelCase_ : int =controlnet_down_blocks
# mid
lowerCamelCase_ : int =block_out_channels[-1]
lowerCamelCase_ : str =FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase_ : List[str] =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , snake_case__ : Dict , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : float = 1.0 , snake_case__ : bool = True , snake_case__ : bool = False , ):
lowerCamelCase_ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ : Optional[Any] =jnp.flip(snake_case__ , axis=1 )
# 1. time
if not isinstance(snake_case__ , jnp.ndarray ):
lowerCamelCase_ : Dict =jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : Any =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ : Optional[Any] =jnp.expand_dims(snake_case__ , 0 )
lowerCamelCase_ : Any =self.time_proj(snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.time_embedding(snake_case__ )
# 2. pre-process
lowerCamelCase_ : List[str] =jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCamelCase_ : Union[str, Any] =self.conv_in(snake_case__ )
lowerCamelCase_ : List[str] =jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCamelCase_ : str =self.controlnet_cond_embedding(snake_case__ )
sample += controlnet_cond
# 3. down
lowerCamelCase_ : List[str] =(sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ : Optional[int] =self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ : Dict =()
for down_block_res_sample, controlnet_block in zip(snake_case__ , self.controlnet_down_blocks ):
lowerCamelCase_ : Dict =controlnet_block(snake_case__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ : List[Any] =controlnet_down_block_res_samples
lowerCamelCase_ : Tuple =self.controlnet_mid_block(snake_case__ )
# 6. scaling
lowerCamelCase_ : Dict =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case__ , mid_block_res_sample=snake_case__ )
| 144
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ConvNextFeatureExtractor"]
lowercase_ = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 357
|
import os
from collections.abc import Iterator
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ):
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE_ ):
lowercase__ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE_ )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).lstrip("./" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return f'''{i * " "}*''' if i else "\n##"
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE_ ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ):
lowercase__ = ""
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ , lowercase__ = os.path.split(SCREAMING_SNAKE_CASE_ )
if filepath != old_path:
lowercase__ = print_path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase__ = f'''{filepath}/{filename}'''.replace(" " , "%20" )
lowercase__ = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 224
| 0
|
import math
def a ( lowerCamelCase_ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( lowerCamelCase_ = 0.1 ):
'''simple docstring'''
lowercase__ = 3
lowercase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple:
a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) )
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : List[Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self.get_dummy_inputs()
a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Optional[Any] = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.get_dummy_inputs()
a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.get_dummy_inputs()
a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Union[str, Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : List[str] = ort.SessionOptions()
a_ : int = False
return options
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a_ : int = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = 'A fantasy landscape, trending on artstation'
a_ : str = torch.manual_seed(0 )
a_ : List[str] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : Dict = output.images
a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a_ : List[str] = init_image.resize((1_2_8, 1_2_8) )
a_ : Dict = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Any = 'A fantasy landscape, trending on artstation'
a_ : Tuple = torch.manual_seed(0 )
a_ : Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : str = output.images
a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Tuple = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 32
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self: List[Any] , *a: Dict , **a: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def _snake_case ( self: Tuple , a: str=None , a: Optional[int]=None , a: Optional[int]=None , **a: List[Any] ):
__lowerCamelCase , __lowerCamelCase : int = {}, {}
if padding is not None:
__lowerCamelCase : int = padding
if truncation is not None:
__lowerCamelCase : str = truncation
if top_k is not None:
__lowerCamelCase : Any = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , a: Union["Image.Image", str] , a: str = None , **a: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase : Dict = {'image': image, 'question': question}
else:
__lowerCamelCase : str = image
__lowerCamelCase : Dict = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def _snake_case ( self: Tuple , a: str , a: Any=False , a: Optional[int]=False ):
__lowerCamelCase : List[str] = load_image(inputs['image'] )
__lowerCamelCase : str = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase : str = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def _snake_case ( self: Optional[int] , a: Tuple ):
__lowerCamelCase : str = self.model(**UpperCamelCase_ )
return model_outputs
def _snake_case ( self: List[str] , a: Optional[int] , a: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase : str = model_outputs.logits.sigmoid()[0]
__lowerCamelCase , __lowerCamelCase : Optional[Any] = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase : str = scores.tolist()
__lowerCamelCase : Dict = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 357
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
__lowerCamelCase : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowerCamelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__lowerCamelCase : Optional[int] = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
__lowerCamelCase : List[str] = image.transpose(0 , 3 , 1 , 2 )
__lowerCamelCase : Union[str, Any] = 2.0 * image - 1.0
__lowerCamelCase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
__lowerCamelCase : str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.9_995 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
__lowerCamelCase : List[str] = True
__lowerCamelCase : str = va.device
__lowerCamelCase : int = va.cpu().numpy()
__lowerCamelCase : List[str] = va.cpu().numpy()
__lowerCamelCase : str = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
__lowerCamelCase : Union[str, Any] = (1 - t) * va + t * va
else:
__lowerCamelCase : List[Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = np.sin(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = theta_a * t
__lowerCamelCase : List[Any] = np.sin(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = np.sin(theta_a - theta_t ) / sin_theta_a
__lowerCamelCase : List[Any] = sin_theta_t / sin_theta_a
__lowerCamelCase : Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
__lowerCamelCase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
__lowerCamelCase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for param in model.parameters():
__lowerCamelCase : Any = value
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Any , a: AutoencoderKL , a: CLIPTextModel , a: CLIPModel , a: CLIPTokenizer , a: UNetaDConditionModel , a: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , a: CLIPFeatureExtractor , a: Union[str, Any]=None , a: Union[str, Any]=None , a: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=a , text_encoder=a , clip_model=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , coca_model=a , coca_tokenizer=a , coca_transform=a , )
__lowerCamelCase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size , a )
else feature_extractor.size['shortest_edge']
)
__lowerCamelCase : List[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a )
set_requires_grad(self.clip_model , a )
def _snake_case ( self: Optional[Any] , a: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def _snake_case ( self: Dict ):
self.enable_attention_slicing(a )
def _snake_case ( self: Optional[Any] ):
set_requires_grad(self.vae , a )
def _snake_case ( self: List[Any] ):
set_requires_grad(self.vae , a )
def _snake_case ( self: int ):
set_requires_grad(self.unet , a )
def _snake_case ( self: int ):
set_requires_grad(self.unet , a )
def _snake_case ( self: Optional[Any] , a: Union[str, Any] , a: List[str] , a: List[Any] ):
# get the original timestep using init_timestep
__lowerCamelCase : List[Any] = min(int(num_inference_steps * strength ) , a )
__lowerCamelCase : str = max(num_inference_steps - init_timestep , 0 )
__lowerCamelCase : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self: Union[str, Any] , a: Optional[Any] , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , a: List[str]=None ):
if not isinstance(a , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(a )}' )
__lowerCamelCase : Union[str, Any] = image.to(device=a , dtype=a )
if isinstance(a , a ):
__lowerCamelCase : str = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
__lowerCamelCase : Tuple = torch.cat(a , dim=0 )
else:
__lowerCamelCase : List[Any] = self.vae.encode(a ).latent_dist.sample(a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : List[str] = 0.1_8_2_1_5 * init_latents
__lowerCamelCase : Union[str, Any] = init_latents.repeat_interleave(a , dim=0 )
__lowerCamelCase : Optional[int] = randn_tensor(init_latents.shape , generator=a , device=a , dtype=a )
# get latents
__lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(a , a , a )
__lowerCamelCase : int = init_latents
return latents
def _snake_case ( self: Optional[int] , a: Any ):
__lowerCamelCase : List[Any] = self.coca_transform(a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowerCamelCase : Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__lowerCamelCase : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def _snake_case ( self: Any , a: Tuple , a: Tuple ):
__lowerCamelCase : Dict = self.feature_extractor.preprocess(a )
__lowerCamelCase : Dict = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
__lowerCamelCase : List[str] = self.clip_model.get_image_features(a )
__lowerCamelCase : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
__lowerCamelCase : Tuple = image_embeddings_clip.repeat_interleave(a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _snake_case ( self: str , a: str , a: int , a: List[Any] , a: str , a: List[Any] , a: Dict , a: int , ):
__lowerCamelCase : Optional[Any] = latents.detach().requires_grad_()
__lowerCamelCase : str = self.scheduler.scale_model_input(a , a )
# predict the noise residual
__lowerCamelCase : Optional[int] = self.unet(a , a , encoder_hidden_states=a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowerCamelCase : str = self.scheduler.alphas_cumprod[timestep]
__lowerCamelCase : Dict = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Optional[int] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowerCamelCase : Optional[int] = torch.sqrt(a )
__lowerCamelCase : int = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a ):
__lowerCamelCase : str = self.scheduler.sigmas[index]
__lowerCamelCase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : Optional[int] = 1 / 0.1_8_2_1_5 * sample
__lowerCamelCase : Optional[Any] = self.vae.decode(a ).sample
__lowerCamelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = transforms.Resize(self.feature_extractor_size )(a )
__lowerCamelCase : Union[str, Any] = self.normalize(a ).to(latents.dtype )
__lowerCamelCase : Tuple = self.clip_model.get_image_features(a )
__lowerCamelCase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
__lowerCamelCase : List[str] = spherical_dist_loss(a , a ).mean() * clip_guidance_scale
__lowerCamelCase : Tuple = -torch.autograd.grad(a , a )[0]
if isinstance(self.scheduler , a ):
__lowerCamelCase : Optional[int] = latents.detach() + grads * (sigma**2)
__lowerCamelCase : List[Any] = noise_pred_original
else:
__lowerCamelCase : str = noise_pred_original - torch.sqrt(a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Any , a: Union[torch.FloatTensor, PIL.Image.Image] , a: Union[torch.FloatTensor, PIL.Image.Image] , a: Optional[str] = None , a: Optional[str] = None , a: Optional[int] = 512 , a: Optional[int] = 512 , a: float = 0.6 , a: Optional[int] = 50 , a: Optional[float] = 7.5 , a: Optional[int] = 1 , a: float = 0.0 , a: Optional[float] = 100 , a: Optional[torch.Generator] = None , a: Optional[str] = "pil" , a: bool = True , a: float = 0.8 , a: float = 0.1 , a: float = 0.1 , ):
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(a )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(a , torch.Generator ) and batch_size > 1:
__lowerCamelCase : List[Any] = [generator] + [None] * (batch_size - 1)
__lowerCamelCase : Dict = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
__lowerCamelCase : Any = [x[0] for x in coca_is_none if x[1]]
__lowerCamelCase : str = ', '.join(a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__lowerCamelCase : Any = self.get_image_description(a )
if style_prompt is None:
if len(a ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__lowerCamelCase : Tuple = self.get_image_description(a )
# get prompt text embeddings for content and style
__lowerCamelCase : int = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
__lowerCamelCase : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowerCamelCase : Union[str, Any] = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
__lowerCamelCase : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowerCamelCase : List[Any] = slerp(a , a , a )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase : Any = text_embeddings.repeat_interleave(a , dim=0 )
# set timesteps
__lowerCamelCase : List[Any] = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowerCamelCase : Union[str, Any] = {}
if accepts_offset:
__lowerCamelCase : Dict = 1
self.scheduler.set_timesteps(a , **a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowerCamelCase , __lowerCamelCase : Dict = self.get_timesteps(a , a , self.device )
__lowerCamelCase : Tuple = timesteps[:1].repeat(a )
# Preprocess image
__lowerCamelCase : Any = preprocess(a , a , a )
__lowerCamelCase : str = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
__lowerCamelCase : Dict = preprocess(a , a , a )
__lowerCamelCase : Optional[int] = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
__lowerCamelCase : int = slerp(a , a , a )
if clip_guidance_scale > 0:
__lowerCamelCase : List[str] = self.get_clip_image_embeddings(a , a )
__lowerCamelCase : Union[str, Any] = self.get_clip_image_embeddings(a , a )
__lowerCamelCase : Union[str, Any] = slerp(
a , a , a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowerCamelCase : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase : Optional[int] = content_text_input.input_ids.shape[-1]
__lowerCamelCase : int = self.tokenizer([''] , padding='max_length' , max_length=a , return_tensors='pt' )
__lowerCamelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowerCamelCase : List[Any] = uncond_embeddings.repeat_interleave(a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase : int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowerCamelCase : str = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowerCamelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowerCamelCase : Tuple = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
__lowerCamelCase : List[Any] = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__lowerCamelCase : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : Dict = {}
if accepts_eta:
__lowerCamelCase : List[str] = eta
# check if the scheduler accepts generator
__lowerCamelCase : Optional[int] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowerCamelCase : Optional[Any] = generator
with self.progress_bar(total=a ):
for i, t in enumerate(a ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(a , a )
# predict the noise residual
__lowerCamelCase : Tuple = self.unet(a , a , encoder_hidden_states=a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase : str = noise_pred.chunk(2 )
__lowerCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowerCamelCase : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowerCamelCase , __lowerCamelCase : int = self.cond_fn(
a , a , a , a , a , a , a , )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : Tuple = self.scheduler.step(a , a , a , **a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
__lowerCamelCase : Union[str, Any] = self.vae.decode(a ).sample
__lowerCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : Union[str, Any] = self.numpy_to_pil(a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 194
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
snake_case : List[str] = pytest.mark.integration
@require_faiss
class _snake_case ( _snake_case ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(_lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE__ ( self ):
import faiss
a :Dataset = self._create_dummy_dataset()
a :str = dset.map(
lambda _lowerCamelCase , _lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_lowerCamelCase , keep_in_memory=_lowerCamelCase )
a :List[Any] = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a :List[Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def SCREAMING_SNAKE_CASE__ ( self ):
import faiss
a :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a :List[Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def SCREAMING_SNAKE_CASE__ ( self ):
import faiss
a :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_lowerCamelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a :List[str] = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(_lowerCamelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
from elasticsearch import Elasticsearch
a :Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a :Tuple = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a :List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a :Tuple = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=_lowerCamelCase )
a , a :Dict = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class _snake_case ( _snake_case ):
def SCREAMING_SNAKE_CASE__ ( self ):
import faiss
a :Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a :Tuple = np.zeros(5 , dtype=np.floataa )
a :str = 1
a , a :int = index.search(_lowerCamelCase )
self.assertRaises(_lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a :Tuple = np.eye(5 , dtype=np.floataa )[::-1]
a , a :Dict = index.search_batch(_lowerCamelCase )
self.assertRaises(_lowerCamelCase , index.search_batch , queries[0] )
a :Union[str, Any] = [scores[0] for scores in total_scores]
a :List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
import faiss
a :List[Any] = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a :str = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_lowerCamelCase ):
a :Tuple = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
import faiss
a :List[str] = faiss.IndexFlat(5 )
a :List[Any] = FaissIndex(custom_index=_lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE__ ( self ):
import faiss
a :Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
a :int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a :Optional[int] = np.zeros(5 , dtype=np.floataa )
a :Any = 1
a , a :Any = index.search(_lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
import faiss
a :Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a :Optional[Any] = '''index.faiss'''
a :Union[str, Any] = F'''mock://{index_name}'''
index.save(UpperCAmelCase_ , storage_options=mockfs.storage_options )
a :List[str] = FaissIndex.load(UpperCAmelCase_ , storage_options=mockfs.storage_options )
a :str = np.zeros(5 , dtype=np.floataa )
a :List[str] = 1
a , a :Union[str, Any] = index.search(UpperCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _snake_case ):
def SCREAMING_SNAKE_CASE__ ( self ):
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a :Any = Elasticsearch()
a :Optional[Any] = {'''acknowledged''': True}
a :Union[str, Any] = ElasticSearchIndex(es_client=_lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a :int = '''foo'''
a :List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a :Optional[Any] = index.search(_lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a :Optional[Any] = '''foo'''
a :Optional[Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a :str = index.search(_lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a :Dict = ['''foo''', '''bar''', '''foobar''']
a :Optional[Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a :List[Any] = index.search_batch(_lowerCamelCase )
a :str = [scores[0] for scores in total_scores]
a :List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , _lowerCamelCase )
# batched queries with timeout
a :Union[str, Any] = ['''foo''', '''bar''', '''foobar''']
a :str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a :str = index.search_batch(_lowerCamelCase , request_timeout=30 )
a :Union[str, Any] = [scores[0] for scores in total_scores]
a :int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , _lowerCamelCase )
| 94
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Any = (3_2, 3_2)
_UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.dummy_cond_unet_upscale
_UpperCAmelCase : Union[str, Any] = DDPMScheduler()
_UpperCAmelCase : str = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : List[Any] = self.dummy_text_encoder
_UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : int = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : str = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCAmelCase : Optional[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.dummy_cond_unet_upscale
_UpperCAmelCase : Tuple = DDPMScheduler()
_UpperCAmelCase : Dict = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : str = self.dummy_vae
_UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
_UpperCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : List[str] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : Any = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : int = output.images
assert image.shape[0] == 2
_UpperCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = self.dummy_cond_unet_upscale
_UpperCAmelCase : Any = DDPMScheduler()
_UpperCAmelCase : Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : List[Any] = self.dummy_text_encoder
_UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
_UpperCAmelCase : Tuple = unet.half()
_UpperCAmelCase : Dict = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Dict = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ).images
_UpperCAmelCase : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCAmelCase : Tuple = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : str = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Union[str, Any] = "a cat sitting on a park bench"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCAmelCase : Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Dict = "a cat sitting on a park bench"
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : int = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : Tuple = "a cat sitting on a park bench"
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 145
| 0
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :List[str] ,__snake_case :Any=13 ,__snake_case :Dict=30 ,__snake_case :Dict=2 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=True ,__snake_case :Any=True ,__snake_case :Dict=32 ,__snake_case :str=2 ,__snake_case :List[Any]=4 ,__snake_case :Tuple=37 ,__snake_case :Dict="gelu" ,__snake_case :Optional[Any]=0.1 ,__snake_case :List[Any]=0.1 ,__snake_case :Tuple=10 ,__snake_case :Dict=0.02 ,__snake_case :Union[str, Any]=3 ,__snake_case :List[str]=0.6 ,__snake_case :Tuple=None ,) -> Optional[Any]:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = mask_ratio
a__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a__ = (image_size // patch_size) ** 2
a__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__( self :int ) -> str:
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__snake_case ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def lowerCamelCase__( self :Tuple ,__snake_case :List[str] ,__snake_case :List[str] ,__snake_case :Dict ) -> List[Any]:
a__ = TFViTMAEModel(config=__snake_case )
a__ = model(__snake_case ,training=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Tuple ,__snake_case :Dict ,__snake_case :Any ,__snake_case :str ) -> Any:
a__ = TFViTMAEForPreTraining(__snake_case )
a__ = model(__snake_case ,training=__snake_case )
# expected sequence length = num_patches
a__ = (self.image_size // self.patch_size) ** 2
a__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
a__ = 1
a__ = TFViTMAEForPreTraining(__snake_case )
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(__snake_case ,training=__snake_case )
a__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase__( self :List[Any] ) -> int:
a__ = self.prepare_config_and_inputs()
((a__) , (a__) , (a__)) = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCAmelCase__ : str = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :List[str] ) -> int:
a__ = TFViTMAEModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,has_text_modality=__snake_case ,hidden_size=37 )
def lowerCamelCase__( self :str ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def lowerCamelCase__( self :str ) -> int:
pass
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case ,tf.keras.layers.Layer ) )
def lowerCamelCase__( self :Union[str, Any] ) -> str:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
# make the mask reproducible
np.random.seed(2 )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = int((config.image_size // config.patch_size) ** 2 )
a__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = self._prepare_for_class(__snake_case ,__snake_case )
a__ = model(__snake_case ,noise=__snake_case )
a__ = copy.deepcopy(self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = model(**__snake_case ,noise=__snake_case )
a__ = outputs_dict[0].numpy()
a__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 )
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
# make the mask reproducible
np.random.seed(2 )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = int((config.image_size // config.patch_size) ** 2 )
a__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__snake_case :Tuple ):
a__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__snake_case ):
a__ = v.numpy()
else:
a__ = np.array(__snake_case )
return inputs_np_dict
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = self._prepare_for_class(__snake_case ,__snake_case )
a__ = prepare_numpy_arrays(__snake_case )
a__ = model(__snake_case ,noise=__snake_case )
a__ = model(**__snake_case ,noise=__snake_case )
self.assert_outputs_same(__snake_case ,__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :Optional[Any] ,__snake_case :Optional[Any] ,__snake_case :List[str] ) -> List[str]:
# make masks reproducible
np.random.seed(2 )
a__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
a__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a__ = tf.constant(__snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a__ = tf_noise
super().check_pt_tf_models(__snake_case ,__snake_case ,__snake_case )
def lowerCamelCase__( self :Any ) -> Optional[int]:
# make mask reproducible
np.random.seed(2 )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__snake_case )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(__snake_case ,__snake_case ),)
if isinstance(__snake_case ,__snake_case )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__snake_case ,'_keras_serializable' ,__snake_case )
}
a__ = int((config.image_size // config.patch_size) ** 2 )
a__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a__ = tf.convert_to_tensor(__snake_case )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
a__ = main_layer_class(__snake_case )
a__ = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
a__ = tf.keras.Model(__snake_case ,outputs=main_layer(__snake_case ) )
a__ = model(__snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__snake_case ,'keras_model.h5' )
model.save(__snake_case )
a__ = tf.keras.models.load_model(
__snake_case ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__snake_case ,tf.keras.Model )
a__ = model(__snake_case )
self.assert_outputs_same(__snake_case ,__snake_case )
@slow
def lowerCamelCase__( self :str ) -> int:
# make mask reproducible
np.random.seed(2 )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = int((config.image_size // config.patch_size) ** 2 )
a__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = self._prepare_for_class(__snake_case ,__snake_case )
a__ = model(__snake_case ,noise=__snake_case )
if model_class.__name__ == "TFViTMAEModel":
a__ = outputs.last_hidden_state.numpy()
a__ = 0
else:
a__ = outputs.logits.numpy()
a__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case ,saved_model=__snake_case )
a__ = model_class.from_pretrained(__snake_case )
a__ = model(__snake_case ,noise=__snake_case )
if model_class.__name__ == "TFViTMAEModel":
a__ = after_outputs['last_hidden_state'].numpy()
a__ = 0
else:
a__ = after_outputs['logits'].numpy()
a__ = 0
a__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__snake_case ,1E-5 )
def lowerCamelCase__( self :List[str] ) -> List[Any]:
# make mask reproducible
np.random.seed(2 )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = int((config.image_size // config.patch_size) ** 2 )
a__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = self._prepare_for_class(__snake_case ,__snake_case )
a__ = model(__snake_case ,noise=__snake_case )
a__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__snake_case )
a__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
a__ = model_class.from_config(model.config )
a__ = new_model(__snake_case ) # Build model
new_model.set_weights(model.get_weights() )
a__ = new_model(__snake_case ,noise=__snake_case )
self.assert_outputs_same(__snake_case ,__snake_case )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
pass
@slow
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__snake_case )
def __lowercase ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def lowerCamelCase__( self :List[str] ) -> str:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
a__ = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__snake_case ,return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a__ = ViTMAEConfig()
a__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
a__ = np.random.uniform(size=(1, num_patches) )
# forward pass
a__ = model(**__snake_case ,noise=__snake_case )
# verify the logits
a__ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape ,__snake_case )
a__ = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,__snake_case ,atol=1E-4 )
| 109
|
from __future__ import annotations
class snake_case_ :
def __init__( self :List[str] ,__snake_case :int ) -> None:
a__ = data
a__ = None
a__ = None
def __lowercase ( __lowerCAmelCase : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __lowercase ( __lowerCAmelCase : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __lowercase ( __lowerCAmelCase : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __lowercase ( ): # Main function for testing.
a__ = Node(1 )
a__ = Node(2 )
a__ = Node(3 )
a__ = Node(4 )
a__ = Node(5 )
a__ = Node(6 )
a__ = Node(7 )
a__ = Node(8 )
a__ = Node(9 )
print(is_full_binary_tree(__lowerCAmelCase ) )
print(depth_of_tree(__lowerCAmelCase ) )
print('Tree is: ' )
display(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 109
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _lowercase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowerCamelCase : Any = "ssube/stable-diffusion-x4-upscaler-onnx"
def UpperCAmelCase__ (self , A=0 ):
lowerCamelCase_ : Optional[Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(A ) )
lowerCamelCase_ : Tuple = torch.manual_seed(A )
lowerCamelCase_ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : str = self.get_dummy_inputs()
lowerCamelCase_ : Dict = pipe(**A ).images
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : int = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCamelCase_ : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Any = self.get_dummy_inputs()
lowerCamelCase_ : Union[str, Any] = pipe(**A ).images
lowerCamelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : List[Any] = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCamelCase_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : int = self.get_dummy_inputs()
lowerCamelCase_ : Optional[int] = pipe(**A ).images
lowerCamelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : str = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCamelCase_ : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : int = self.get_dummy_inputs()
lowerCamelCase_ : Dict = pipe(**A ).images
lowerCamelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : Union[str, Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCamelCase_ : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : List[str] = self.get_dummy_inputs()
lowerCamelCase_ : Any = pipe(**A ).images
lowerCamelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : Tuple = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
@property
def UpperCAmelCase__ (self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ort.SessionOptions()
lowerCamelCase_ : Optional[int] = False
return options
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : Tuple = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
lowerCamelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[str] = torch.manual_seed(0 )
lowerCamelCase_ : List[str] = pipe(
prompt=A , image=A , guidance_scale=7.5 , num_inference_steps=1_0 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Any = output.images
lowerCamelCase_ : Optional[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : Tuple = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : Dict = init_image.resize((1_2_8, 1_2_8) )
lowerCamelCase_ : int = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
lowerCamelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Any = pipe(
prompt=A , image=A , guidance_scale=7.5 , num_inference_steps=2_0 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Any = output.images
lowerCamelCase_ : Union[str, Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : Union[str, Any] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 318
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318
| 1
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=1_0_2_4 ) -> Union[str, Any]:
A , A: Optional[Any] = [], []
A: Optional[Any] = list(zip(__lowercase , __lowercase ) )
A , A: Optional[int] = sorted_examples[0]
def is_too_big(__lowercase ):
return tok(__lowercase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A: Optional[Any] = new_src + ''' ''' + src
A: int = new_tgt + ''' ''' + tgt
if is_too_big(__lowercase ) or is_too_big(__lowercase ): # cant fit, finalize example
finished_src.append(__lowercase )
finished_tgt.append(__lowercase )
A , A: List[Any] = src, tgt
else: # can fit, keep adding
A , A: Union[str, Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowercase )
finished_tgt.append(__lowercase )
return finished_src, finished_tgt
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
A: List[str] = Path(__lowercase )
save_path.mkdir(exist_ok=__lowercase )
for split in ["train"]:
A , A: str = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
A: Union[str, Any] = [x.rstrip() for x in Path(__lowercase ).open().readlines()]
A: Optional[int] = [x.rstrip() for x in Path(__lowercase ).open().readlines()]
A , A: int = pack_examples(__lowercase , __lowercase , __lowercase , __lowercase )
print(F"""packed {split} split from {len(__lowercase )} examples -> {len(__lowercase )}.""" )
Path(save_path / F"""{split}.source""" ).open('''w''' ).write('''\n'''.join(__lowercase ) )
Path(save_path / F"""{split}.target""" ).open('''w''' ).write('''\n'''.join(__lowercase ) )
for split in ["val", "test"]:
A , A: List[str] = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(__lowercase , save_path / F"""{split}.source""" )
shutil.copyfile(__lowercase , save_path / F"""{split}.target""" )
def SCREAMING_SNAKE_CASE( ) -> List[str]:
A: int = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=__lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=__lowercase , default=1_2_8 )
parser.add_argument('''--data_dir''' , type=__lowercase )
parser.add_argument('''--save_path''' , type=__lowercase )
A: Dict = parser.parse_args()
A: Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowercase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 360
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
return np.maximum(0 , __lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 334
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase : Dict = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase : List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def _SCREAMING_SNAKE_CASE (A , A=100 , A=" " ) -> List[str]:
"""simple docstring"""
lowercase__ = text.split(A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(A ) , A )]
def _SCREAMING_SNAKE_CASE (A ) -> dict:
"""simple docstring"""
lowercase__ ,lowercase__ = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(A ):
titles.append(title if title is not None else '''''' )
texts.append(A )
return {"title": titles, "text": texts}
def _SCREAMING_SNAKE_CASE (A , A , A ) -> dict:
"""simple docstring"""
lowercase__ = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=A , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase__ = ctx_encoder(input_ids.to(device=A ) , return_dict=A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _SCREAMING_SNAKE_CASE (A , A , A , ) -> List[str]:
"""simple docstring"""
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ = dataset.map(A , batched=A , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=A )
lowercase__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase__ = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase__ = dataset.map(
partial(A , ctx_encoder=A , ctx_tokenizer=A ) , batched=A , batch_size=processing_args.batch_size , features=A , )
# And finally save your dataset
lowercase__ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=A )
# And save the index
lowercase__ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : str = field(
default=str(Path(lowercase_ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowercase_ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
lowerCAmelCase__ : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
lowerCAmelCase__ : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
lowerCAmelCase__ : Optional[str] = field(
default=str(Path(lowercase_ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = field(
default=lowercase_ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
lowerCAmelCase__ : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : int = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
lowerCAmelCase__ : int = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase : Optional[Any] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase : Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 2
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = [0] * len(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Tuple = [1] * len(lowerCAmelCase__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase__ )
while queue:
lowerCAmelCase_ : Union[str, Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCAmelCase_ : Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase__ )
print(max(lowerCAmelCase__ ) )
# Adjacency list of Graph
lowercase__ : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 224
| 0
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = "cpu" , UpperCAmelCase_ = None ):
_UpperCamelCase : str = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
_UpperCamelCase : str = v.half()
if save_path is None: # overwrite src_path
_UpperCamelCase : Tuple = src_path
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 236
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = DanceDiffusionPipeline
lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : str = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=16000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=lowerCamelCase__ ,use_timestep_embedding=lowerCamelCase__ ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_UpperCamelCase : int = IPNDMScheduler()
_UpperCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int=0 ):
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
_UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCamelCase : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : str = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : int = DanceDiffusionPipeline(**lowerCamelCase__ )
_UpperCamelCase : List[str] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = output.audios
_UpperCamelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCamelCase : Dict = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = torch_device
_UpperCamelCase : Dict = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_UpperCamelCase : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = pipe(generator=lowerCamelCase__ ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_UpperCamelCase : Optional[int] = output.audios
_UpperCamelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase : Tuple = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = torch_device
_UpperCamelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_UpperCamelCase : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = pipe(generator=lowerCamelCase__ ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_UpperCamelCase : Any = output.audios
_UpperCamelCase : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase : Any = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 236
| 1
|
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase_ ( _snake_case ):
return np.array_equal(_snake_case ,matrix.conjugate().T )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = v.conjugate().T
SCREAMING_SNAKE_CASE__ : str = v_star.dot(_snake_case )
assert isinstance(_snake_case ,np.ndarray )
return (v_star_dot.dot(_snake_case )) / (v_star.dot(_snake_case ))
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE__ : Dict = np.array([[1], [2], [3]] )
assert is_hermitian(_snake_case ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_snake_case ,_snake_case ) )
SCREAMING_SNAKE_CASE__ : Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_snake_case ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_snake_case ,_snake_case ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 25
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
return (-y * np.log(__snake_case ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = np.dot(__snake_case, __snake_case )
return np.sum(y * scores - np.log(1 + np.exp(__snake_case ) ) )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=7_00_00 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__snake_case ):
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = np.dot(x.T, h - y ) / y.size
_UpperCamelCase = theta - alpha * gradient # updating the weights
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = cost_function(__snake_case, __snake_case )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_a = datasets.load_iris()
_a = iris.data[:, :2]
_a = (iris.target != 0) * 1
_a = 0.1
_a = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__snake_case, __snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_a) , (_a)) = (x[:, 0].min(), x[:, 0].max())
((_a) , (_a)) = (x[:, 1].min(), x[:, 1].max())
((_a) , (_a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_a = np.c_[xxa.ravel(), xxa.ravel()]
_a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 194
| 0
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [[] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(__lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ = min(__lowerCamelCase, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [''''''.join(__lowerCamelCase ) for row in temp_grid]
SCREAMING_SNAKE_CASE_ = ''''''.join(__lowerCamelCase )
return output_string
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE_ = [[] for _ in range(__lowerCamelCase )] # generates template
for position in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ = min(__lowerCamelCase, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
SCREAMING_SNAKE_CASE_ = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE_ = input_string[counter : counter + len(__lowerCamelCase )]
grid.append(list(__lowerCamelCase ) )
counter += len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = '''''' # reads as zigzag
for position in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ = min(__lowerCamelCase, lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
for key_guess in range(1, len(__lowerCamelCase ) ): # tries every key
SCREAMING_SNAKE_CASE_ = decrypt(__lowerCamelCase, __lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =(16, 32, 96, 256)
UpperCAmelCase_ =jnp.floataa
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE_ = self.block_out_channels[i]
SCREAMING_SNAKE_CASE_ = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
SCREAMING_SNAKE_CASE_ = blocks
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.conv_in(_A )
SCREAMING_SNAKE_CASE_ = nn.silu(_A )
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = block(_A )
SCREAMING_SNAKE_CASE_ = nn.silu(_A )
SCREAMING_SNAKE_CASE_ = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCamelCase__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =32
UpperCAmelCase_ =4
UpperCAmelCase_ =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ =False
UpperCAmelCase_ =(320, 640, 1_280, 1_280)
UpperCAmelCase_ =2
UpperCAmelCase_ =8
UpperCAmelCase_ =None
UpperCAmelCase_ =1_280
UpperCAmelCase_ =0.0
UpperCAmelCase_ =False
UpperCAmelCase_ =jnp.floataa
UpperCAmelCase_ =True
UpperCAmelCase_ =0
UpperCAmelCase_ ="rgb"
UpperCAmelCase_ =(16, 32, 96, 256)
def _UpperCamelCase ( self , _A ) -> FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE_ = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE_ = jnp.zeros(_A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE_ = jnp.zeros(_A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = jax.random.split(_A )
SCREAMING_SNAKE_CASE_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.block_out_channels
SCREAMING_SNAKE_CASE_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE_ = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE_ = FlaxTimestepEmbedding(_A , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE_ = self.only_cross_attention
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE_ = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
SCREAMING_SNAKE_CASE_ = down_blocks
SCREAMING_SNAKE_CASE_ = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE_ = block_out_channels[-1]
SCREAMING_SNAKE_CASE_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ) -> Union[FlaxControlNetOutput, Tuple]:
SCREAMING_SNAKE_CASE_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE_ = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
SCREAMING_SNAKE_CASE_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(_A , 0 )
SCREAMING_SNAKE_CASE_ = self.time_proj(_A )
SCREAMING_SNAKE_CASE_ = self.time_embedding(_A )
# 2. pre-process
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.conv_in(_A )
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , _A , deterministic=not train )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE_ = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE_ = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE_ = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE_ = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE_ = self.controlnet_mid_block(_A )
# 6. scaling
SCREAMING_SNAKE_CASE_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : Dict = logging.get_logger(__name__)
A_ : List[str] = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "swin"
UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self ,a_=224 ,a_=4 ,a_=3 ,a_=96 ,a_=[2, 2, 6, 2] ,a_=[3, 6, 12, 24] ,a_=7 ,a_=4.0 ,a_=True ,a_=0.0 ,a_=0.0 ,a_=0.1 ,a_="gelu" ,a_=False ,a_=0.02 ,a_=1E-5 ,a_=32 ,a_=None ,a_=None ,**a_ ,) -> Dict:
super().__init__(**snake_case__ )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : Union[str, Any] = patch_size
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Tuple = embed_dim
_UpperCAmelCase : Any = depths
_UpperCAmelCase : List[Any] = len(snake_case__ )
_UpperCAmelCase : List[str] = num_heads
_UpperCAmelCase : Union[str, Any] = window_size
_UpperCAmelCase : Optional[Any] = mlp_ratio
_UpperCAmelCase : List[str] = qkv_bias
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = drop_path_rate
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[int] = use_absolute_embeddings
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase : int = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_UpperCAmelCase : Tuple = ["stem"] + [f'''stage{idx}''' for idx in range(1 ,len(snake_case__ ) + 1 )]
_UpperCAmelCase : List[Any] = get_aligned_output_features_output_indices(
out_features=snake_case__ ,out_indices=snake_case__ ,stage_names=self.stage_names )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Optional[int]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> Optional[Any]:
return 1E-4
| 215
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : int =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase : Dict = VideoClassificationPipeline(model=snake_case__ , image_processor=snake_case__ , top_k=2 )
lowerCAmelCase : Any = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
for example in examples:
lowerCAmelCase : str = video_classifier(snake_case__ )
self.assertEqual(
snake_case__ , [
{"score": ANY(snake_case__ ), "label": ANY(snake_case__ )},
{"score": ANY(snake_case__ ), "label": ANY(snake_case__ )},
] , )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowerCAmelCase : str = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
lowerCAmelCase : int = pipeline(
"video-classification" , model=snake_case__ , feature_extractor=snake_case__ , frame_sampling_rate=4 )
lowerCAmelCase : Optional[int] = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase : Union[str, Any] = video_classifier(snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
lowerCAmelCase : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def lowercase__ ( self ):
"""simple docstring"""
pass
| 108
| 0
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase__ : Any = TypeVar("""_T""")
class UpperCamelCase__ ( Generic[_T] ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Iterable[_T] | None = None ):
lowerCAmelCase_ : list[_T] = list(iterable or [] )
lowerCAmelCase_ : list[_T] = []
def __len__( self : List[str] ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : int ):
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : _T ):
self._stacka.append(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : List[str] = self._stacka.pop
lowerCAmelCase_ : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : Dict = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289
| 1
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def a_ ( lowerCamelCase ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
|
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : Any , a : Union[str, Any] , a : List[str]=7 , a : List[Any]=3 , a : int=30 , a : Any=400 , a : List[str]=True , a : Any=None , a : Union[str, Any]=True , a : Union[str, Any]=[0.5, 0.5, 0.5] , a : List[Any]=[0.5, 0.5, 0.5] , a : str=True , a : Optional[int]=1 / 255 , a : Dict=True , ):
'''simple docstring'''
lowerCAmelCase__ : Dict = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : int = min_resolution
lowerCAmelCase__ : Dict = max_resolution
lowerCAmelCase__ : Dict = do_resize
lowerCAmelCase__ : Any = size
lowerCAmelCase__ : Any = do_normalize
lowerCAmelCase__ : int = image_mean
lowerCAmelCase__ : Tuple = image_std
lowerCAmelCase__ : Optional[int] = do_rescale
lowerCAmelCase__ : str = rescale_factor
lowerCAmelCase__ : Optional[int] = do_pad
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self : Optional[Any] , a : List[Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if not batched:
lowerCAmelCase__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
lowerCAmelCase__ : Tuple = image.size
else:
lowerCAmelCase__ : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ : str = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ : str = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ : int = self.size['shortest_edge']
lowerCAmelCase__ : Optional[int] = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ : List[Any] = self.size['shortest_edge']
lowerCAmelCase__ : int = self.size['shortest_edge']
else:
lowerCAmelCase__ : List[Any] = []
for image in image_inputs:
lowerCAmelCase__ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ : List[str] = max(a , key=lambda a : item[0] )[0]
lowerCAmelCase__ : List[Any] = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( __UpperCamelCase , unittest.TestCase ):
lowercase = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = ConditionalDetrImageProcessingTester(self )
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , a )
lowerCAmelCase__ : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowerCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(a , batched=a )
lowerCAmelCase__ : Optional[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowerCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
lowerCAmelCase__ : Dict = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ : int = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ : List[str] = image_processing(a , return_tensors='pt' ).pixel_values
lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ : int = json.loads(f.read() )
lowerCAmelCase__ : Any = {'image_id': 39_769, 'annotations': target}
# encode them
lowerCAmelCase__ : Any = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
lowerCAmelCase__ : Union[str, Any] = image_processing(images=a , annotations=a , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Optional[int] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) )
# verify boxes
lowerCAmelCase__ : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , a )
lowerCAmelCase__ : Optional[int] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Any = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) )
# verify is_crowd
lowerCAmelCase__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) )
# verify class_labels
lowerCAmelCase__ : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) )
# verify orig_size
lowerCAmelCase__ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) )
# verify size
lowerCAmelCase__ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ : Dict = json.loads(f.read() )
lowerCAmelCase__ : Optional[int] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
lowerCAmelCase__ : Tuple = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ : Union[str, Any] = ConditionalDetrImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ : Optional[Any] = image_processing(images=a , annotations=a , masks_path=a , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ : Dict = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1E-4 ) )
# verify area
lowerCAmelCase__ : str = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) )
# verify boxes
lowerCAmelCase__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) )
# verify is_crowd
lowerCAmelCase__ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) )
# verify class_labels
lowerCAmelCase__ : Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) )
# verify masks
lowerCAmelCase__ : List[Any] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a )
# verify orig_size
lowerCAmelCase__ : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) )
# verify size
lowerCAmelCase__ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
| 351
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 0
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_UpperCAmelCase : Any = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def UpperCAmelCase__ ( lowerCamelCase ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if args.check_lib:
_UpperCAmelCase : Optional[Any] = importlib.import_module("transformers")
_UpperCAmelCase : Any = Path(transformers_module.__file__).parent
else:
_UpperCAmelCase : Tuple = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 236
|
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :List[str] = ""
for word_or_phrase in separated:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowerCamelCase : List[Any] = get_tests_dir('''fixtures''')
__lowerCamelCase : Optional[int] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__lowerCamelCase : Any = get_tests_dir('''fixtures/dummy-config.json''')
class __snake_case ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase ).to_dict()
config_dict.pop("""feature_extractor_type""" )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase , revision="""aaaaaa""" )
def __a ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self : str ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _lowercase )
AutoFeatureExtractor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoFeatureExtractor.register(_lowercase , _lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self : Any ):
"""simple docstring"""
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = True
try:
AutoConfig.register("""custom""" , _lowercase )
AutoFeatureExtractor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(_lowercase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 204
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __snake_case ( unittest.TestCase ):
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
SCREAMING_SNAKE_CASE__ = Vector()
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowercase ) , """(0,0,0,0,0,1)""" )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowercase ) , 4 )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2] )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4, 5] )
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([2, -1, 4] ) # for test of dot product
SCREAMING_SNAKE_CASE__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __a ( self : str ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowercase , _lowercase ) ) , """(3,4,7)""" )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = x.copy()
self.assertEqual(str(_lowercase ) , str(_lowercase ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowercase ) , """(0,1,0)""" )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowercase , _lowercase ) )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowercase , _lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __a ( self : Any ):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 204
| 1
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = filter(lambda snake_case__ : p.requires_grad , model.parameters() )
_snake_case : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
A_ = logging.getLogger(__name__)
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
if metric == "rouge2":
_snake_case : Union[str, Any] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_snake_case : Union[str, Any] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_snake_case : Tuple = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
_snake_case : List[Any] = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
""" function.""" )
_snake_case : Union[str, Any] = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"val_{metric}" , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Any ):
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode="""min""" if """loss""" in metric else """max""" , patience=a__ , verbose=a__ , )
class lowercase( pl.Callback ):
'''simple docstring'''
def UpperCamelCase_ ( self: str, a_: int, a_: int ):
'''simple docstring'''
_snake_case : List[Any] = {f"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def UpperCamelCase_ ( self: Any, a_: Tuple, a_: Union[str, Any], a_: Tuple, a_: List[Any]=True ):
'''simple docstring'''
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
_snake_case : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_snake_case : Union[str, Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case : List[str] = od / """test_results.txt"""
_snake_case : List[str] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case : str = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
_snake_case : Union[str, Any] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A, """a+""" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case : Dict = metrics[key]
if isinstance(_A, torch.Tensor ):
_snake_case : Any = val.item()
_snake_case : Union[str, Any] = f"{key}: {val:.6f}\n"
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
_snake_case : int = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_A )
@rank_zero_only
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[int], a_: Tuple ):
'''simple docstring'''
try:
_snake_case : List[Any] = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case : List[str] = pl_module.model.num_parameters()
_snake_case : Optional[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCamelCase_ ( self: Optional[int], a_: Optional[Any], a_: Optional[int] ):
'''simple docstring'''
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(_A, _A, """test""" )
@rank_zero_only
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 64
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = emb.weight.shape
__SCREAMING_SNAKE_CASE = nn.Linear(a__ , a__ , bias=a__ )
__SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __lowercase ( a__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location='cpu' )
__SCREAMING_SNAKE_CASE = mam_aaa['args'] or mam_aaa['cfg']['model']
__SCREAMING_SNAKE_CASE = mam_aaa['model']
remove_ignore_keys_(a__ )
__SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
__SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=a__ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
__SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
__SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(a__ )
model.model.load_state_dict(a__ , strict=a__ )
__SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ : Optional[int] =parser.parse_args()
lowerCAmelCase__ : Tuple =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 257
| 0
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( __lowercase , __lowercase):
@register_to_config
def __init__( self: Tuple , *,
_lowerCAmelCase: int = 4 , _lowerCAmelCase: int = 7_68 , _lowerCAmelCase: int , _lowerCAmelCase: Dict , ):
super().__init__()
lowercase :Optional[int] = nn.Parameter(torch.zeros(UpperCAmelCase__ ) )
# parameters for additional clip time embeddings
lowercase :int = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase :Any = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
# parameters for encoder hidden states
lowercase :Tuple = clip_extra_context_tokens
lowercase :Any = nn.Linear(
UpperCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
lowercase :List[Any] = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase :Union[str, Any] = nn.LayerNorm(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self: int , *, _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: str ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowercase :int = image_embeddings.shape[0]
lowercase :Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowercase :str = classifier_free_guidance_embeddings.expand(
UpperCAmelCase__ , -1 )
lowercase :Tuple = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowercase :int = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowercase :List[str] = self.embedding_proj(UpperCAmelCase__ )
lowercase :List[str] = self.clip_image_embeddings_project_to_time_embeddings(UpperCAmelCase__ )
lowercase :Optional[int] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowercase :Optional[Any] = self.clip_extra_context_tokens_proj(UpperCAmelCase__ )
lowercase :List[str] = clip_extra_context_tokens.reshape(UpperCAmelCase__ , -1 , self.clip_extra_context_tokens )
lowercase :Tuple = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowercase :Optional[int] = self.encoder_hidden_states_proj(UpperCAmelCase__ )
lowercase :Optional[int] = self.text_encoder_hidden_states_norm(UpperCAmelCase__ )
lowercase :int = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 361
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_UpperCAmelCase : List[str] = None
try:
import msvcrt
except ImportError:
_UpperCAmelCase : Tuple = None
try:
import fcntl
except ImportError:
_UpperCAmelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_UpperCAmelCase : Tuple = OSError
# Data
# ------------------------------------------------
_UpperCAmelCase : Optional[int] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_UpperCAmelCase : Optional[Any] = "3.0.12"
_UpperCAmelCase : int = None
def UpperCAmelCase__ ( ):
global _logger
lowercase :List[str] = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: Dict ):
lowercase :Any = lock_file
return None
def __str__( self: Dict ):
lowercase :str = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class __lowerCAmelCase :
def __init__( self: Tuple , _lowerCAmelCase: Any ):
lowercase :Optional[Any] = lock
return None
def __enter__( self: List[Any] ):
return self.lock
def __exit__( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] ):
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self: Optional[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Tuple=-1 , _lowerCAmelCase: int=None ):
lowercase :Any = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
lowercase :int = self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase )
# The path to the lock file.
lowercase :List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase :Any = None
# The default timeout value.
lowercase :Any = timeout
# We use this lock primarily for the lock counter.
lowercase :Optional[int] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase :Optional[int] = 0
return None
@property
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return self._lock_file
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ):
lowercase :Tuple = float(_lowerCAmelCase )
return None
def SCREAMING_SNAKE_CASE ( self: int ):
raise NotImplementedError()
def SCREAMING_SNAKE_CASE ( self: int ):
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Union[str, Any]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase :List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase :Any = id(self )
lowercase :Optional[int] = self._lock_file
lowercase :Optional[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(_lowerCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase :Union[str, Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Tuple=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase :Union[str, Any] = id(self )
lowercase :str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
lowercase :List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self: Tuple ):
self.acquire()
return self
def __exit__( self: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict ):
self.release()
return None
def __del__( self: Optional[Any] ):
self.release(force=_lowerCAmelCase )
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: int ):
lowercase :Union[str, Any] = os.path.basename(_lowerCAmelCase )
if len(_lowerCAmelCase ) > max_length and max_length > 0:
lowercase :Dict = os.path.dirname(_lowerCAmelCase )
lowercase :Any = str(hash(_lowerCAmelCase ) )
lowercase :Union[str, Any] = filename[: max_length - len(_lowerCAmelCase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(_lowerCAmelCase , _lowerCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any]=-1 , _lowerCAmelCase: List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
lowercase :Optional[int] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase :Tuple = os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_lowerCAmelCase )
else:
lowercase :Any = fd
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Any = self._lock_file_fd
lowercase :Tuple = None
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(_lowerCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=-1 , _lowerCAmelCase: Tuple=None ):
lowercase :List[str] = os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase :Optional[int] = os.open(self._lock_file , _lowerCAmelCase )
try:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_lowerCAmelCase )
else:
lowercase :Optional[Any] = fd
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase :Dict = self._lock_file_fd
lowercase :Union[str, Any] = None
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN )
os.close(_lowerCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase):
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase :List[Any] = os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
lowercase :int = fd
return None
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
os.close(self._lock_file_fd )
lowercase :int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_UpperCAmelCase : Tuple = None
if msvcrt:
_UpperCAmelCase : str = WindowsFileLock
elif fcntl:
_UpperCAmelCase : List[Any] = UnixFileLock
else:
_UpperCAmelCase : Optional[int] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 158
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : Optional[int] = 'pix2struct_text_model'
_snake_case : Dict = ['past_key_values']
_snake_case : Union[str, Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , __lowerCAmelCase : List[Any]=5_0244 , __lowerCAmelCase : Dict=768 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : int=32 , __lowerCAmelCase : Optional[int]=128 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Union[str, Any]=1e-6 , __lowerCAmelCase : List[str]=1.0 , __lowerCAmelCase : int="gelu_new" , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : str=1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Union[str, Any] , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = d_kv
_UpperCAmelCase = d_ff
_UpperCAmelCase = num_layers
_UpperCAmelCase = num_heads
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = use_cache
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = decoder_start_token_id
# for backwards compatibility
_UpperCAmelCase = dense_act_fn
super().__init__(
pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , is_decoder=__lowerCAmelCase , **__lowerCAmelCase , )
@classmethod
def lowerCAmelCase_ ( cls : Any , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : Union[str, Any] ):
cls._set_token_in_kwargs(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_UpperCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class a ( lowerCAmelCase_ ):
_snake_case : Tuple = 'pix2struct_vision_model'
def __init__( self : Any , __lowerCAmelCase : List[str]=768 , __lowerCAmelCase : List[str]=768 , __lowerCAmelCase : int=2048 , __lowerCAmelCase : Optional[int]=64 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Optional[int]="gelu_new" , __lowerCAmelCase : Optional[int]=1e-6 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Optional[Any]=1e-1_0 , __lowerCAmelCase : Optional[int]=1.0 , __lowerCAmelCase : Any=4096 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Optional[int]=128 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = patch_embed_hidden_size
_UpperCAmelCase = d_ff
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = dense_act_fn
_UpperCAmelCase = seq_len
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = d_kv
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : Optional[int] ):
cls._set_token_in_kwargs(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_UpperCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'pix2struct'
_snake_case : Any = True
def __init__( self : Optional[int] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Dict=1.0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Any , ):
super().__init__(tie_word_embeddings=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
if text_config is None:
_UpperCAmelCase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
_UpperCAmelCase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
_UpperCAmelCase = PixaStructTextConfig(**__lowerCAmelCase )
_UpperCAmelCase = PixaStructVisionConfig(**__lowerCAmelCase )
_UpperCAmelCase = self.text_config.decoder_start_token_id
_UpperCAmelCase = self.text_config.pad_token_id
_UpperCAmelCase = self.text_config.eos_token_id
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = initializer_range
_UpperCAmelCase = self.initializer_range
_UpperCAmelCase = self.initializer_range
_UpperCAmelCase = is_vqa
@classmethod
def lowerCAmelCase_ ( cls : Any , __lowerCAmelCase : PixaStructTextConfig , __lowerCAmelCase : PixaStructVisionConfig , **__lowerCAmelCase : Dict ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 289
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class a :
def __init__( self : Union[str, Any] ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = {}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ):
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
if nodea not in self.connections:
self.add_node(__lowerCAmelCase )
_UpperCAmelCase = probability
def lowerCAmelCase_ ( self : Optional[Any] ):
return list(self.connections )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = 0
_UpperCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = Counter(graph.get_nodes() )
_UpperCAmelCase = start
for _ in range(lowercase ):
_UpperCAmelCase = graph.transition(lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 6_55_36 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (32, 32, 64) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
A: Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
A: Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ )
A: List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A: str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ )
A: Any = block_out_channels[0]
if use_timestep_embedding:
A: Optional[Any] = block_out_channels[0] * 4
A: List[Any] = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , )
A: Optional[Any] = nn.ModuleList([] )
A: str = None
A: str = nn.ModuleList([] )
A: Tuple = None
# down
A: Any = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
A: Optional[int] = output_channel
A: List[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A: List[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1
A: Optional[int] = get_down_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE_ )
# mid
A: Union[str, Any] = get_mid_block(
SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , )
# up
A: Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) )
A: List[str] = reversed_block_out_channels[0]
if out_block_type is None:
A: int = out_channels
else:
A: Union[str, Any] = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
A: List[Any] = output_channel
A: int = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels
)
A: Optional[int] = i == len(SCREAMING_SNAKE_CASE_ ) - 1
A: Optional[Any] = get_up_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE_ )
A: Any = output_channel
# out
A: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
A: Optional[int] = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
A: Any = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
A: Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0:
A: List[str] = timesteps[None].to(sample.device )
A: int = self.time_proj(SCREAMING_SNAKE_CASE_ )
if self.config.use_timestep_embedding:
A: List[Any] = self.time_mlp(SCREAMING_SNAKE_CASE_ )
else:
A: str = timestep_embed[..., None]
A: Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A: Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A: List[str] = ()
for downsample_block in self.down_blocks:
A , A: Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A: Dict = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A: List[Any] = down_block_res_samples[-1:]
A: List[str] = down_block_res_samples[:-1]
A: Optional[int] = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
# 5. post-process
if self.out_block:
A: Any = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
| 334
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.