code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
random.seed(_SCREAMING_SNAKE_CASE )
np.random.seed(_SCREAMING_SNAKE_CASE )
torch.manual_seed(_SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class SCREAMING_SNAKE_CASE :
def __init__( self : str , a : Iterable[torch.nn.Parameter] , a : float = 0.9999 , a : float = 0.0 , a : int = 0 , a : bool = False , a : Union[float, int] = 1.0 , a : Union[float, int] = 2 / 3 , a : Optional[Any] = None , a : Dict[str, Any] = None , **a : Any , )-> Optional[int]:
"""simple docstring"""
if isinstance(a , torch.nn.Module ):
lowercase__ = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , a , standard_warn=a , )
lowercase__ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase__ = True
if kwargs.get('max_value' , a ) is not None:
lowercase__ = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , a , standard_warn=a )
lowercase__ = kwargs['max_value']
if kwargs.get('min_value' , a ) is not None:
lowercase__ = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , a , standard_warn=a )
lowercase__ = kwargs['min_value']
lowercase__ = list(a )
lowercase__ = [p.clone().detach() for p in parameters]
if kwargs.get('device' , a ) is not None:
lowercase__ = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , a , standard_warn=a )
self.to(device=kwargs['device'] )
lowercase__ = None
lowercase__ = decay
lowercase__ = min_decay
lowercase__ = update_after_step
lowercase__ = use_ema_warmup
lowercase__ = inv_gamma
lowercase__ = power
lowercase__ = 0
lowercase__ = None # set in `step()`
lowercase__ = model_cls
lowercase__ = model_config
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , a : Union[str, Any] , a : Dict )-> "EMAModel":
"""simple docstring"""
lowercase__ , lowercase__ = model_cls.load_config(a , return_unused_kwargs=a )
lowercase__ = model_cls.from_pretrained(a )
lowercase__ = cls(model.parameters() , model_cls=a , model_config=model.config )
ema_model.load_state_dict(a )
return ema_model
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Any )-> Optional[int]:
"""simple docstring"""
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
lowercase__ = self.model_cls.from_config(self.model_config )
lowercase__ = self.state_dict()
state_dict.pop('shadow_params' , a )
model.register_to_config(**a )
self.copy_to(model.parameters() )
model.save_pretrained(a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int )-> float:
"""simple docstring"""
lowercase__ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase__ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase__ = (1 + step) / (10 + step)
lowercase__ = min(a , self.decay )
# make sure decay is not smaller than min_decay
lowercase__ = max(a , self.min_decay )
return cur_decay_value
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : str , a : Iterable[torch.nn.Parameter] )-> Dict:
"""simple docstring"""
if isinstance(a , torch.nn.Module ):
lowercase__ = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , a , standard_warn=a , )
lowercase__ = parameters.parameters()
lowercase__ = list(a )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase__ = self.get_decay(self.optimization_step )
lowercase__ = decay
lowercase__ = 1 - decay
lowercase__ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , a ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase__ = deepspeed.zero.GatheredParameters(a , modifier_rank=a )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Iterable[torch.nn.Parameter] )-> None:
"""simple docstring"""
lowercase__ = list(a )
for s_param, param in zip(self.shadow_params , a ):
param.data.copy_(s_param.to(param.device ).data )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None )-> None:
"""simple docstring"""
lowercase__ = [
p.to(device=a , dtype=a ) if p.is_floating_point() else p.to(device=a )
for p in self.shadow_params
]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Iterable[torch.nn.Parameter] )-> None:
"""simple docstring"""
lowercase__ = [param.detach().cpu().clone() for param in parameters]
def SCREAMING_SNAKE_CASE_ ( self : str , a : Iterable[torch.nn.Parameter] )-> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , a ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase__ = None
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : dict )-> None:
"""simple docstring"""
lowercase__ = copy.deepcopy(a )
lowercase__ = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
lowercase__ = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , a ):
raise ValueError('Invalid min_decay' )
lowercase__ = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , a ):
raise ValueError('Invalid optimization_step' )
lowercase__ = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , a ):
raise ValueError('Invalid update_after_step' )
lowercase__ = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , a ):
raise ValueError('Invalid use_ema_warmup' )
lowercase__ = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
lowercase__ = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
lowercase__ = state_dict.get('shadow_params' , a )
if shadow_params is not None:
lowercase__ = shadow_params
if not isinstance(self.shadow_params , a ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(a , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 235 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ = set()
return any(
node not in visited and depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for node in graph )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
visited.add(_SCREAMING_SNAKE_CASE )
rec_stk.add(_SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 235 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = BlenderbotSmallConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = 'gelu'
def __init__( self : Optional[Any] , __A : Optional[Any] , __A : Optional[Any]=1_3 , __A : List[str]=7 , __A : List[str]=True , __A : Tuple=False , __A : str=9_9 , __A : Union[str, Any]=3_2 , __A : str=2 , __A : Optional[Any]=4 , __A : Optional[int]=3_7 , __A : str=0.1 , __A : str=0.1 , __A : int=2_0 , __A : Any=2 , __A : str=1 , __A : Union[str, Any]=0 , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = bos_token_id
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase = prepare_blenderbot_small_inputs_dict(__A , __A , __A )
return config, inputs_dict
def snake_case ( self : int , __A : Tuple , __A : List[str] ):
"""simple docstring"""
_lowercase = TFBlenderbotSmallModel(config=__A ).get_decoder()
_lowercase = inputs_dict["input_ids"]
_lowercase = input_ids[:1, :]
_lowercase = inputs_dict["attention_mask"][:1, :]
_lowercase = inputs_dict["head_mask"]
_lowercase = 1
# first forward pass
_lowercase = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowercase = model(__A , attention_mask=__A )[0]
_lowercase = model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowercase = output_from_no_past[:, -3:, random_slice_idx]
_lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1e-3 )
def A__ ( A_ , A_ , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , ) -> Optional[Any]:
if attention_mask is None:
_lowercase = tf.cast(tf.math.not_equal(A_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = TFBlenderbotSmallModelTester(self )
_lowercase = ConfigTester(self , config_class=__A )
def snake_case ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_tokenizers
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
UpperCAmelCase__ = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : str ):
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.tokenizer(self.src_text , return_tensors="tf" )
_lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
_lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 602 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : List[Any] = logging.get_logger(__name__)
def A__ ( A_ ) -> List[str]:
_lowercase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
_lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , A_ )
if matches:
_lowercase = float(matches[1] )
_lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowercase = 1_001
_lowercase = "imagenet-1k-id2label.json"
_lowercase = "huggingface/label-files"
_lowercase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
_lowercase = {int(A_ ) + 1: v for k, v in idalabel.items()}
_lowercase = "background"
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
return config
def A__ ( ) -> str:
_lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def A__ ( A_ , A_ , A_ , A_=False ) -> List[Any]:
_lowercase = get_mobilenet_va_config(A_ )
# Load 🤗 model
_lowercase = MobileNetVaForImageClassification(A_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(A_ , A_ , A_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
_lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
_lowercase = model(**A_ )
_lowercase = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
_lowercase = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowercase = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
_lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , A_ , atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A_ )
if push_to_hub:
print("Pushing to the hub..." )
_lowercase = "google/" + model_name
image_processor.push_to_hub(A_ )
model.push_to_hub(A_ )
if __name__ == "__main__":
__magic_name__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__magic_name__ : Union[str, Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 602 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase =logging.get_logger(__name__)
class __magic_name__ ( _a ):
UpperCAmelCase =CLIPConfig
UpperCAmelCase =["CLIPEncoderLayer"]
def __init__( self , snake_case) -> str:
'''simple docstring'''
super().__init__(lowercase__)
_UpperCAmelCase : Dict =CLIPVisionModelWithProjection(config.vision_config)
_UpperCAmelCase : str =nn.Linear(config.vision_config.projection_dim , 1)
_UpperCAmelCase : Dict =nn.Linear(config.vision_config.projection_dim , 1)
@torch.no_grad()
def lowerCAmelCase ( self , snake_case , snake_case , snake_case=0.5 , snake_case=0.5) -> str:
'''simple docstring'''
_UpperCAmelCase : int =self.vision_model(lowercase__)[0]
_UpperCAmelCase : str =self.p_head(lowercase__)
_UpperCAmelCase : Union[str, Any] =nsfw_detected.flatten()
_UpperCAmelCase : Any =nsfw_detected > p_threshold
_UpperCAmelCase : str =nsfw_detected.tolist()
if any(lowercase__):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.')
for idx, nsfw_detected_ in enumerate(lowercase__):
if nsfw_detected_:
_UpperCAmelCase : Optional[Any] =np.zeros(images[idx].shape)
_UpperCAmelCase : str =self.w_head(lowercase__)
_UpperCAmelCase : Dict =watermark_detected.flatten()
_UpperCAmelCase : List[str] =watermark_detected > w_threshold
_UpperCAmelCase : Tuple =watermark_detected.tolist()
if any(lowercase__):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.')
for idx, watermark_detected_ in enumerate(lowercase__):
if watermark_detected_:
_UpperCAmelCase : Union[str, Any] =np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 446 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A_ ( _a ):
'''simple docstring'''
a__ = CustomTokenizer
pass
| 303 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A : Dict = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
__lowerCamelCase : nn.Module
__lowerCamelCase : List[nn.Module] = field(default_factory=SCREAMING_SNAKE_CASE )
__lowerCamelCase : list = field(default_factory=SCREAMING_SNAKE_CASE )
def a_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tensor , __lowerCAmelCase : Tensor ) -> Union[str, Any]:
"""simple docstring"""
A__ = len(list(m.modules() ) ) == 1 or isinstance(__lowerCAmelCase , nn.Convad ) or isinstance(__lowerCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__lowerCAmelCase )
def __call__( self : Any , __lowerCAmelCase : Tensor ) -> List[Any]:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__lowerCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return list(filter(lambda __lowerCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
__lowerCamelCase : nn.Module
__lowerCamelCase : nn.Module
__lowerCamelCase : int = 0
__lowerCamelCase : List = field(default_factory=SCREAMING_SNAKE_CASE )
__lowerCamelCase : List = field(default_factory=SCREAMING_SNAKE_CASE )
def __call__( self : Dict , __lowerCAmelCase : Tensor ) -> int:
"""simple docstring"""
A__ = Tracker(self.dest )(__lowerCAmelCase ).parametrized
A__ = Tracker(self.src )(__lowerCAmelCase ).parametrized
A__ = list(filter(lambda __lowerCAmelCase : type(__lowerCAmelCase ) not in self.src_skip , __lowerCAmelCase ) )
A__ = list(filter(lambda __lowerCAmelCase : type(__lowerCAmelCase ) not in self.dest_skip , __lowerCAmelCase ) )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise Exception(
f'Numbers of operations are different. Source module has {len(__lowerCAmelCase )} operations while'
f' destination module has {len(__lowerCAmelCase )}.' )
for dest_m, src_m in zip(__lowerCAmelCase , __lowerCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def __lowerCamelCase ( __a :str , __a :ResNetConfig , __a :Path , __a :bool = True ) -> Union[str, Any]:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
A__ = timm.create_model(__a , pretrained=__a ).eval()
A__ = ResNetForImageClassification(__a ).eval()
A__ = ModuleTransfer(src=__a , dest=__a )
A__ = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__a )
assert torch.allclose(from_model(__a ) , our_model(__a ).logits ), "The model logits don't match the original one."
A__ = F'resnet{"-".join(name.split("resnet" ) )}'
print(__a )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__a , )
# we can use the convnext one
A__ = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__a , )
print(F'Pushed {checkpoint_name}' )
def __lowerCamelCase ( __a :Path , __a :str = None , __a :bool = True ) -> Union[str, Any]:
"""simple docstring"""
A__ = """imagenet-1k-id2label.json"""
A__ = 1_0_0_0
A__ = (1, num_labels)
A__ = """huggingface/label-files"""
A__ = num_labels
A__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
A__ = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
A : Tuple = parser.parse_args()
A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 247 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 247 | 1 |
'''simple docstring'''
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : str = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
__A : Optional[int] = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
if magnitude == 0:
__A : List[str] = column
continue
__A : Optional[Any] = column / magnitude
# Subtract to cancel term
__A : Any = current_set[0]
__A : Tuple = [first_row]
__A : Any = current_set[1::]
for row in current_set:
__A : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__A : int = final_set[0]
__A : Dict = []
__A : int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__A : str = simplify(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE )
__A : Any = resultant
return final_set
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
__A : Tuple = len(SCREAMING_SNAKE_CASE ) + 1
if any(len(SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
__A : str = equations.copy()
if any(0 in row for row in data_set ):
__A : Optional[Any] = data_set.copy()
__A : Tuple = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
if 0 not in row:
__A : List[str] = data_set.pop(SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , SCREAMING_SNAKE_CASE )
__A : Tuple = data_set.copy()
__A : Optional[Any] = simplify(SCREAMING_SNAKE_CASE )
__A : Dict = simplified[::-1]
__A : list = []
for row in simplified:
__A : List[Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__A : List[Any] = row.copy()[: len(SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
__A : Dict = temp_row[1::]
__A : Any = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE )
__A : Tuple = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 111 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(0 , SCREAMING_SNAKE_CASE ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
_UpperCamelCase = 1
while K:
_UpperCamelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_UpperCamelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 111 | 1 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def __UpperCAmelCase ( __a : Any ,__a : List[Any] ,__a : List[Any] ,__a : str ) -> Optional[Any]:
"""simple docstring"""
_a : List[Any] = sorted(zip(__a ,__a ) ,key=lambda __a : x[0] / x[1] ,reverse=__a )
_a : List[str] = [i[0] for i in r], [i[1] for i in r]
_a : Optional[int] = list(accumulate(__a ) )
_a : Any = bisect(__a ,__a )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
import qiskit
def __UpperCAmelCase ( __a : int ,__a : int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
_a : Dict = qiskit.Aer.get_backend('''aer_simulator''' )
_a : List[Any] = qiskit.QuantumCircuit(4 ,2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 ,2 )
qc_ha.cx(1 ,2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 ,1 ,3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 ,0 ) # extract XOR value
qc_ha.measure(3 ,1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a : Tuple = qiskit.execute(__a ,__a ,shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__a )
if __name__ == "__main__":
a__ = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 578 | 0 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
random.seed(__lowercase )
np.random.seed(__lowercase )
torch.manual_seed(__lowercase )
torch.cuda.manual_seed_all(__lowercase )
# ^^ safe to call this function even if cuda is not available
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Iterable[torch.nn.Parameter] , __UpperCAmelCase : float = 0.9999 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Union[float, int] = 1.0 , __UpperCAmelCase : Union[float, int] = 2 / 3 , __UpperCAmelCase : Optional[Any] = None , __UpperCAmelCase : Dict[str, Any] = None , **__UpperCAmelCase : Optional[int] , ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , torch.nn.Module ):
_A = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , __UpperCAmelCase , standard_warn=__UpperCAmelCase , )
_A = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_A = True
if kwargs.get("max_value" , __UpperCAmelCase ) is not None:
_A = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , __UpperCAmelCase , standard_warn=__UpperCAmelCase )
_A = kwargs["max_value"]
if kwargs.get("min_value" , __UpperCAmelCase ) is not None:
_A = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , __UpperCAmelCase , standard_warn=__UpperCAmelCase )
_A = kwargs["min_value"]
_A = list(__UpperCAmelCase )
_A = [p.clone().detach() for p in parameters]
if kwargs.get("device" , __UpperCAmelCase ) is not None:
_A = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , __UpperCAmelCase , standard_warn=__UpperCAmelCase )
self.to(device=kwargs["device"] )
_A = None
_A = decay
_A = min_decay
_A = update_after_step
_A = use_ema_warmup
_A = inv_gamma
_A = power
_A = 0
_A = None # set in `step()`
_A = model_cls
_A = model_config
@classmethod
def lowerCAmelCase ( cls : Dict , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A , _A = model_cls.load_config(__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase )
_A = model_cls.from_pretrained(__UpperCAmelCase )
_A = cls(model.parameters() , model_cls=__UpperCAmelCase , model_config=model.config )
ema_model.load_state_dict(__UpperCAmelCase )
return ema_model
def lowerCAmelCase ( self : str , __UpperCAmelCase : int ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_A = self.model_cls.from_config(self.model_config )
_A = self.state_dict()
state_dict.pop("shadow_params" , __UpperCAmelCase )
model.register_to_config(**__UpperCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_A = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_A = (1 + step) / (10 + step)
_A = min(__UpperCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
_A = max(__UpperCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , torch.nn.Module ):
_A = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , __UpperCAmelCase , standard_warn=__UpperCAmelCase , )
_A = parameters.parameters()
_A = list(__UpperCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_A = self.get_decay(self.optimization_step )
_A = decay
_A = 1 - decay
_A = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __UpperCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_A = deepspeed.zero.GatheredParameters(__UpperCAmelCase , modifier_rank=__UpperCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__UpperCAmelCase )
def lowerCAmelCase ( self : int , __UpperCAmelCase : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
_A = list(__UpperCAmelCase )
for s_param, param in zip(self.shadow_params , __UpperCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str=None , __UpperCAmelCase : Dict=None ):
'''simple docstring'''
_A = [
p.to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) if p.is_floating_point() else p.to(device=__UpperCAmelCase )
for p in self.shadow_params
]
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
_A = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , __UpperCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_A = None
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : dict ):
'''simple docstring'''
_A = copy.deepcopy(__UpperCAmelCase )
_A = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_A = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , __UpperCAmelCase ):
raise ValueError("Invalid min_decay" )
_A = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , __UpperCAmelCase ):
raise ValueError("Invalid optimization_step" )
_A = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , __UpperCAmelCase ):
raise ValueError("Invalid update_after_step" )
_A = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __UpperCAmelCase ):
raise ValueError("Invalid use_ema_warmup" )
_A = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_A = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_A = state_dict.get("shadow_params" , __UpperCAmelCase )
if shadow_params is not None:
_A = shadow_params
if not isinstance(self.shadow_params , __UpperCAmelCase ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 330 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''marian'''
snake_case = ['''past_key_values''']
snake_case = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=58101 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[int]=1024 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : Tuple=4096 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Optional[Any]=4096 , __UpperCAmelCase : str=16 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : int=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Union[str, Any]=1024 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Union[str, Any]=58100 , __UpperCAmelCase : int=False , __UpperCAmelCase : List[str]=58100 , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Optional[int]=True , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
_A = vocab_size
_A = decoder_vocab_size or vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_A = {0: "batch"}
_A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_A = {0: "batch", 1: "decoder_sequence"}
_A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_A , _A = self.num_layers
for i in range(__UpperCAmelCase ):
_A = {0: "batch", 2: "past_sequence + sequence"}
_A = {0: "batch", 2: "past_sequence + sequence"}
else:
_A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_A = super().outputs
else:
_A = super(__UpperCAmelCase , self ).outputs
if self.use_past:
_A , _A = self.num_layers
for i in range(__UpperCAmelCase ):
_A = {0: "batch", 2: "past_sequence + sequence"}
_A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
_A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
_A = seq_length if not self.use_past else 1
_A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_A = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_A , _A = common_inputs["input_ids"].shape
_A = common_inputs["decoder_input_ids"].shape[1]
_A , _A = self.num_attention_heads
_A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A = decoder_seq_length + 3
_A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
_A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_A , _A = self.num_layers
_A = min(__UpperCAmelCase , __UpperCAmelCase )
_A = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
_A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
_A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
_A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_A , _A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_A = seqlen + 2
_A , _A = self.num_layers
_A , _A = self.num_attention_heads
_A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A = common_inputs["attention_mask"].dtype
_A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
_A = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def lowerCAmelCase ( self : str , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
_A = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_A = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
_A = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_A = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
_A = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_A = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_A = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return 1E-4
| 330 | 1 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCamelCase_ = False
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = 'ybelkada/fonts'
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Dict:
requires_backends(snake_case__ , ['''torch'''] )
_check_torch_version()
__UpperCAmelCase =image_tensor.unsqueeze(0 )
__UpperCAmelCase =torch.nn.functional.unfold(snake_case__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__UpperCAmelCase =patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , snake_case__ , snake_case__ , -1 )
__UpperCAmelCase =patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ = 36 , snake_case__ = "black" , snake_case__ = "white" , snake_case__ = 5 , snake_case__ = 5 , snake_case__ = 5 , snake_case__ = 5 , snake_case__ = None , snake_case__ = None , ) -> Image.Image:
requires_backends(snake_case__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__UpperCAmelCase =textwrap.TextWrapper(width=80 )
__UpperCAmelCase =wrapper.wrap(text=snake_case__ )
__UpperCAmelCase ='''\n'''.join(snake_case__ )
if font_bytes is not None and font_path is None:
__UpperCAmelCase =io.BytesIO(snake_case__ )
elif font_path is not None:
__UpperCAmelCase =font_path
else:
__UpperCAmelCase =hf_hub_download(snake_case__ , '''Arial.TTF''' )
__UpperCAmelCase =ImageFont.truetype(snake_case__ , encoding='''UTF-8''' , size=snake_case__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__UpperCAmelCase =ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , snake_case__ ) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =temp_draw.textbbox((0, 0) , snake_case__ , snake_case__ )
# Create the actual image with a bit of padding around the text.
__UpperCAmelCase =text_width + left_padding + right_padding
__UpperCAmelCase =text_height + top_padding + bottom_padding
__UpperCAmelCase =Image.new('''RGB''' , (image_width, image_height) , snake_case__ )
__UpperCAmelCase =ImageDraw.Draw(snake_case__ )
draw.text(xy=(left_padding, top_padding) , text=snake_case__ , fill=snake_case__ , font=snake_case__ )
return image
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , **snake_case__ ) -> Tuple:
requires_backends(snake_case__ , '''vision''' )
# Convert to PIL image if necessary
__UpperCAmelCase =to_pil_image(snake_case__ )
__UpperCAmelCase =render_text(snake_case__ , **snake_case__ )
__UpperCAmelCase =max(header_image.width , image.width )
__UpperCAmelCase =int(image.height * (new_width / image.width) )
__UpperCAmelCase =int(header_image.height * (new_width / header_image.width) )
__UpperCAmelCase =Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__UpperCAmelCase =to_numpy_array(snake_case__ )
if infer_channel_dimension_format(snake_case__ ) == ChannelDimension.LAST:
__UpperCAmelCase =to_channel_dimension_format(snake_case__ , ChannelDimension.LAST )
return new_image
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Any = ['''flattened_patches''']
def __init__(self , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 2_0_4_8 , UpperCAmelCase = False , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
__UpperCAmelCase =patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__UpperCAmelCase =do_normalize
__UpperCAmelCase =do_convert_rgb
__UpperCAmelCase =max_patches
__UpperCAmelCase =is_vqa
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , '''torch''')
_check_torch_version()
# convert to torch
__UpperCAmelCase =to_channel_dimension_format(UpperCAmelCase , ChannelDimension.FIRST)
__UpperCAmelCase =torch.from_numpy(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =patch_size['''height'''], patch_size['''width''']
__UpperCAmelCase , __UpperCAmelCase =get_image_size(UpperCAmelCase)
# maximize scale s.t.
__UpperCAmelCase =math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
__UpperCAmelCase =max(min(math.floor(scale * image_height / patch_height) , UpperCAmelCase) , 1)
__UpperCAmelCase =max(min(math.floor(scale * image_width / patch_width) , UpperCAmelCase) , 1)
__UpperCAmelCase =max(num_feasible_rows * patch_height , 1)
__UpperCAmelCase =max(num_feasible_cols * patch_width , 1)
__UpperCAmelCase =torch.nn.functional.interpolate(
image.unsqueeze(0) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=UpperCAmelCase , antialias=UpperCAmelCase , ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
__UpperCAmelCase =torch_extract_patches(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =patches.shape
__UpperCAmelCase =patches_shape[1]
__UpperCAmelCase =patches_shape[2]
__UpperCAmelCase =patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__UpperCAmelCase =patches.reshape([rows * columns, depth])
# [rows * columns, 1]
__UpperCAmelCase =torch.arange(UpperCAmelCase).reshape([rows, 1]).repeat(1 , UpperCAmelCase).reshape([rows * columns, 1])
__UpperCAmelCase =torch.arange(UpperCAmelCase).reshape([1, columns]).repeat(UpperCAmelCase , 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__UpperCAmelCase =row_ids.to(torch.floataa)
__UpperCAmelCase =col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__UpperCAmelCase =torch.cat([row_ids, col_ids, patches] , -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
__UpperCAmelCase =torch.nn.functional.pad(UpperCAmelCase , [0, 0, 0, max_patches - (rows * columns)]).float()
__UpperCAmelCase =to_numpy_array(UpperCAmelCase)
return result
def A__ (self , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase):
'''simple docstring'''
if image.dtype == np.uinta:
__UpperCAmelCase =image.astype(np.floataa)
# take mean across the whole `image`
__UpperCAmelCase =np.mean(UpperCAmelCase)
__UpperCAmelCase =np.std(UpperCAmelCase)
__UpperCAmelCase =max(UpperCAmelCase , 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , **UpperCAmelCase)
def A__ (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase =patch_size if patch_size is not None else self.patch_size
__UpperCAmelCase =max_patches if max_patches is not None else self.max_patches
__UpperCAmelCase =self.is_vqa
if kwargs.get('''data_format''' , UpperCAmelCase) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''')
__UpperCAmelCase =make_list_of_images(UpperCAmelCase)
if not valid_images(UpperCAmelCase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase =[convert_to_rgb(UpperCAmelCase) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase =[to_numpy_array(UpperCAmelCase) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''')
__UpperCAmelCase =kwargs.pop('''font_bytes''' , UpperCAmelCase)
__UpperCAmelCase =kwargs.pop('''font_path''' , UpperCAmelCase)
if isinstance(UpperCAmelCase , UpperCAmelCase):
__UpperCAmelCase =[header_text] * len(UpperCAmelCase)
__UpperCAmelCase =[
render_header(UpperCAmelCase , header_text[i] , font_bytes=UpperCAmelCase , font_path=UpperCAmelCase)
for i, image in enumerate(UpperCAmelCase)
]
if do_normalize:
__UpperCAmelCase =[self.normalize(image=UpperCAmelCase) for image in images]
# convert to torch tensor and permute
__UpperCAmelCase =[
self.extract_flattened_patches(image=UpperCAmelCase , max_patches=UpperCAmelCase , patch_size=UpperCAmelCase)
for image in images
]
# create attention mask in numpy
__UpperCAmelCase =[(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
__UpperCAmelCase =BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=UpperCAmelCase)
return encoded_outputs
| 142 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__(self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=3_2 , UpperCAmelCase=3 , UpperCAmelCase=1_0 , UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embeddings_size
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =is_training
__UpperCAmelCase =use_labels
__UpperCAmelCase =hidden_act
__UpperCAmelCase =num_labels
__UpperCAmelCase =scope
__UpperCAmelCase =len(UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.num_labels)
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def A__ (self):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =TFRegNetModel(config=UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase , training=UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.num_labels
__UpperCAmelCase =TFRegNetForImageClassification(UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a_ : Any = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a_ : Union[str, Any] = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a_ : str = False
a_ : List[str] = False
a_ : int = False
a_ : List[str] = False
a_ : List[Any] = False
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFRegNetModelTester(self)
__UpperCAmelCase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase)
def A__ (self):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def A__ (self):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''')) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def A__ (self):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def A__ (self):
'''simple docstring'''
pass
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(UpperCAmelCase)
__UpperCAmelCase =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
__UpperCAmelCase =model_class(UpperCAmelCase)
__UpperCAmelCase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase) , training=UpperCAmelCase)
__UpperCAmelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase =layer_type
__UpperCAmelCase =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={}):
__UpperCAmelCase =model(UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase):
if isinstance(UpperCAmelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase):
recursive_check(UpperCAmelCase , UpperCAmelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCAmelCase , UpperCAmelCase)) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}"""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase)
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'''output_hidden_states''': True})
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
__UpperCAmelCase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'''output_hidden_states''': True})
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =TFRegNetModel.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
__UpperCAmelCase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def A__ (self):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =prepare_img()
__UpperCAmelCase =image_processor(images=UpperCAmelCase , return_tensors='''tf''')
# forward pass
__UpperCAmelCase =model(**UpperCAmelCase , training=UpperCAmelCase)
# verify the logits
__UpperCAmelCase =tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase)
__UpperCAmelCase =tf.constant([-0.4180, -1.5051, -3.4836])
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4)
| 142 | 1 |
"""simple docstring"""
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
A__ = len(__a )
A__ = [0] * len_array
if len_array > 0:
A__ = array[0]
for i in range(1 , __a ):
A__ = self.prefix_sum[i - 1] + array[i]
def _UpperCAmelCase ( self , __a , __a ):
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
if isinstance(__a , __a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
A__ = deepcopy(__a )
elif os.path.exists(__a ):
with io.open(__a , 'r' , encoding='utf-8' ) as f:
A__ = json.load(__a )
else:
try:
A__ = baseaa.urlsafe_baadecode(__a ).decode('utf-8' )
A__ = json.loads(__a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
A__ = config
self.set_stage_and_offload()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.get_value('zero_optimization.stage' , -1 )
# offload
A__ = False
if self.is_zeroa() or self.is_zeroa():
A__ = set(['cpu', 'nvme'] )
A__ = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
A__ = True
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.config
# find the config node of interest if it exists
A__ = ds_key_long.split('.' )
A__ = nodes.pop()
for node in nodes:
A__ = config.get(__a )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCAmelCase ( self , __a , __a=None ):
"""simple docstring"""
A__ , A__ = self.find_config_node(__a )
if config is None:
return default
return config.get(__a , __a )
def _UpperCAmelCase ( self , __a , __a=False ):
"""simple docstring"""
A__ = self.config
# find the config node of interest if it exists
A__ = ds_key_long.split('.' )
for node in nodes:
A__ = config
A__ = config.get(__a )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.get_value(__a )
return False if value is None else bool(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.get_value(__a )
return False if value is None else not bool(__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._stage == 2
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._stage == 3
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._offload
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
A__ = engine
def _UpperCAmelCase ( self , __a , **__a ):
"""simple docstring"""
self.engine.backward(__a , **__a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
super().__init__(__a , device_placement=__a , scaler=__a )
A__ = hasattr(self.optimizer , 'overflow' )
def _UpperCAmelCase ( self , __a=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a , __a ):
"""simple docstring"""
super().__init__(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=0.001 , __a=0 , **__a ):
"""simple docstring"""
A__ = params
A__ = lr
A__ = weight_decay
A__ = kwargs
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=None , __a=0 , **__a ):
"""simple docstring"""
A__ = optimizer
A__ = total_num_steps
A__ = warmup_num_steps
A__ = kwargs
| 260 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : int = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : str = logging.get_logger(__name__)
def __lowerCAmelCase ( __snake_case ):
# initialize config
if "resnet-50" in model_name:
__lowerCAmelCase = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
__lowerCAmelCase = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
__lowerCAmelCase = DetrConfig(use_timm_backbone=__snake_case , backbone_config=__snake_case )
# set label attributes
__lowerCAmelCase = "panoptic" in model_name
if is_panoptic:
__lowerCAmelCase = 250
else:
__lowerCAmelCase = 91
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "coco-detection-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(__snake_case ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCAmelCase ( __snake_case ):
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = state_dict.pop(__snake_case )
__lowerCAmelCase = val
def __lowerCAmelCase ( __snake_case , __snake_case=False ):
__lowerCAmelCase = ""
if is_panoptic:
__lowerCAmelCase = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:256, :]
__lowerCAmelCase = in_proj_bias[:256]
__lowerCAmelCase = in_proj_weight[256:512, :]
__lowerCAmelCase = in_proj_bias[256:512]
__lowerCAmelCase = in_proj_weight[-256:, :]
__lowerCAmelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:256, :]
__lowerCAmelCase = in_proj_bias[:256]
__lowerCAmelCase = in_proj_weight[256:512, :]
__lowerCAmelCase = in_proj_bias[256:512]
__lowerCAmelCase = in_proj_weight[-256:, :]
__lowerCAmelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__lowerCAmelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCAmelCase = in_proj_weight_cross_attn[:256, :]
__lowerCAmelCase = in_proj_bias_cross_attn[:256]
__lowerCAmelCase = in_proj_weight_cross_attn[256:512, :]
__lowerCAmelCase = in_proj_bias_cross_attn[256:512]
__lowerCAmelCase = in_proj_weight_cross_attn[-256:, :]
__lowerCAmelCase = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( ):
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( __snake_case , __snake_case=None , __snake_case=False ):
__lowerCAmelCase , __lowerCAmelCase = get_detr_config(__snake_case )
# load original model from torch hub
__lowerCAmelCase = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F"""Converting model {model_name}...""" )
__lowerCAmelCase = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=__snake_case ).eval()
__lowerCAmelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__snake_case ):
if is_panoptic:
__lowerCAmelCase = "detr." + src
rename_key(__snake_case , __snake_case , __snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__lowerCAmelCase = state_dict.pop(__snake_case )
__lowerCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase = state_dict.pop(__snake_case )
__lowerCAmelCase = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__lowerCAmelCase = state_dict.pop(__snake_case )
__lowerCAmelCase = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__lowerCAmelCase = state_dict.pop(__snake_case )
__lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase = DetrForSegmentation(__snake_case ) if is_panoptic else DetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify our conversion on an image
__lowerCAmelCase = "coco_panoptic" if is_panoptic else "coco_detection"
__lowerCAmelCase = DetrImageProcessor(format=__snake_case )
__lowerCAmelCase = processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
__lowerCAmelCase = detr(__snake_case )
__lowerCAmelCase = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowerCamelCase : int = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 290 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
lowerCAmelCase__ = False
if num < 0:
lowerCAmelCase__ = True
lowerCAmelCase__ = -num
lowerCAmelCase__ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(lowerCamelCase__ ) for e in binary )
return "0b" + "".join(str(lowerCamelCase__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 | """simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : Dict = TypeVar("KT")
__lowerCAmelCase : Optional[Any] = TypeVar("VT")
class a_ ( Generic[KT, VT] ):
def __init__( self : Tuple , snake_case__ : KT | str = "root" , snake_case__ : VT | None = None ):
lowerCAmelCase__ = key
lowerCAmelCase__ = value
lowerCAmelCase__ = []
def __repr__( self : Union[str, Any] ):
return F"""Node({self.key}: {self.value})"""
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return len(self.forward )
class a_ ( Generic[KT, VT] ):
def __init__( self : int , snake_case__ : float = 0.5 , snake_case__ : int = 16 ):
lowerCAmelCase__ = Node[KT, VT]()
lowerCAmelCase__ = 0
lowerCAmelCase__ = p
lowerCAmelCase__ = max_level
def __str__( self : int ):
lowerCAmelCase__ = list(self )
if len(snake_case__ ) == 0:
return F"""SkipList(level={self.level})"""
lowerCAmelCase__ = max((len(str(snake_case__ ) ) for item in items) , default=4 )
lowerCAmelCase__ = max(snake_case__ , 4 ) + 4
lowerCAmelCase__ = self.head
lowerCAmelCase__ = []
lowerCAmelCase__ = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(snake_case__ , """-""" ) + """* """ * len(snake_case__ ) )
lines.append(""" """ * label_size + """| """ * len(snake_case__ ) )
while len(node.forward ) != 0:
lowerCAmelCase__ = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(snake_case__ , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(snake_case__ ) )
lowerCAmelCase__ = node.forward
lines.append("""None""".ljust(snake_case__ ) + """* """ * len(snake_case__ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(snake_case__ )
def __iter__( self : Any ):
lowerCAmelCase__ = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCAmelCase__ = node.forward[0]
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : List[str] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCAmelCase__ = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(snake_case__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : KT ):
lowerCAmelCase__ , lowerCAmelCase__ = self._locate_node(snake_case__ )
if node is not None:
for i, update_node in enumerate(snake_case__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCAmelCase__ = node.forward[i]
else:
lowerCAmelCase__ = update_node.forward[:i]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : KT , snake_case__ : VT ):
lowerCAmelCase__ , lowerCAmelCase__ = self._locate_node(snake_case__ )
if node is not None:
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , snake_case__ ):
update_vector.append(self.head )
lowerCAmelCase__ = level
lowerCAmelCase__ = Node(snake_case__ , snake_case__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(snake_case__ )
else:
lowerCAmelCase__ = new_node
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : VT ):
lowerCAmelCase__ , lowerCAmelCase__ = self._locate_node(snake_case__ )
if node is not None:
return node.value
return None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
lowerCAmelCase__ = skip_list.head
lowerCAmelCase__ = {}
while node.level != 0:
lowerCAmelCase__ = node.forward[0]
lowerCAmelCase__ = node.value
assert len(lowerCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
lowerCAmelCase__ = skip_list.head
lowerCAmelCase__ = {}
while node.level != 0:
lowerCAmelCase__ = node.forward[0]
lowerCAmelCase__ = node.value
if len(lowerCamelCase__ ) != 4:
print()
assert len(lowerCamelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
assert skip_list.find("""Some key""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(lowerCamelCase__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _UpperCAmelCase ( ):
"""simple docstring"""
def is_sorted(lowerCamelCase__ ):
return all(next_item >= item for item, next_item in zip(lowerCamelCase__ , lst[1:] ) )
lowerCAmelCase__ = SkipList()
for i in range(10 ):
skip_list.insert(lowerCamelCase__ , lowerCamelCase__ )
assert is_sorted(list(lowerCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCamelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowerCamelCase__ ) )
def _UpperCAmelCase ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 644 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : str = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 713 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 267 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 400_0000 ) -> int:
_lowercase = []
_lowercase , _lowercase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
_lowercase , _lowercase = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __a ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = "arrow" , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ = load_from_cache_file
lowerCAmelCase_ = file_format
lowerCAmelCase_ = Spark(
df=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , working_dir=UpperCAmelCase , **UpperCAmelCase , )
def lowerCamelCase_ ( self ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 552 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''nielsr/canine-s''': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
lowercase = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowercase = 0
lowercase = 0xe0_00
lowercase = 0xe0_01
lowercase = 0xe0_02
lowercase = 0xe0_03
lowercase = 0xe0_04
# Maps special codepoints to human-readable names.
lowercase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '''[CLS]''',
SEP: '''[SEP]''',
BOS: '''[BOS]''',
MASK: '''[MASK]''',
PAD: '''[PAD]''',
RESERVED: '''[RESERVED]''',
}
# Maps special codepoint human-readable names to their codepoint values.
lowercase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __UpperCamelCase : Dict=chr(__UpperCamelCase ) , __UpperCamelCase : Optional[int]=chr(__UpperCamelCase ) , __UpperCamelCase : int=chr(__UpperCamelCase ) , __UpperCamelCase : Optional[Any]=chr(__UpperCamelCase ) , __UpperCamelCase : Optional[Any]=chr(__UpperCamelCase ) , __UpperCamelCase : int=chr(__UpperCamelCase ) , __UpperCamelCase : Dict=False , __UpperCamelCase : Dict=2_0_4_8 , **__UpperCamelCase : Union[str, Any] , ):
lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , model_max_length=__UpperCamelCase , **__UpperCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCamelCase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCamelCase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCamelCase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCamelCase_ = UNICODE_VOCAB_SIZE
lowerCamelCase_ = len(self._special_codepoints )
@property
def lowercase__ ( self : Optional[Any] ):
return self._unicode_vocab_size
def lowercase__ ( self : Dict , __UpperCamelCase : str ):
return list(__UpperCamelCase )
def lowercase__ ( self : str , __UpperCamelCase : str ):
try:
return ord(__UpperCamelCase )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def lowercase__ ( self : str , __UpperCamelCase : int ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCamelCase )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[str] ):
return "".join(__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowercase__ ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
lowerCamelCase_ = [1] + ([0] * len(__UpperCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCamelCase )) + [1]
return result
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
return ()
| 718 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase = logging.get_logger(__name__)
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self : Any , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : float = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Optional[int] , ):
super().__init__(**__UpperCamelCase )
lowerCamelCase_ = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase_ = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : List[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : float , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , ):
lowerCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase_ = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase_ = int(shortest_edge / crop_pct )
lowerCamelCase_ = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase )
lowerCamelCase_ = resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__UpperCamelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCamelCase , **__UpperCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__UpperCamelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : str , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , ):
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , ):
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : float = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : List[str] , ):
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
lowerCamelCase_ = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , crop_pct=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
lowerCamelCase_ = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 103 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : int = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure) | 293 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
snake_case : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
snake_case : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.task_name.lower()
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Any = """train"""
snake_case : List[Any] = """dev"""
snake_case : Tuple = """test"""
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : GlueDataTrainingArguments
snake_case : str
snake_case : List[InputFeatures]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ):
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , __lowerCAmelCase , )
UpperCamelCase__ = args
UpperCamelCase__ = glue_processors[args.task_name]()
UpperCamelCase__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
UpperCamelCase__ = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
UpperCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ = label_list[2], label_list[1]
UpperCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ = cached_features_file + """.lock"""
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
UpperCamelCase__ = time.time()
UpperCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase__ = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase__ = examples[:limit_length]
UpperCamelCase__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
UpperCamelCase__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
return self.features[i]
def _lowerCamelCase ( self ):
return self.label_list
| 548 |
import inspect
import unittest
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _lowerCamelCase ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
UpperCamelCase__ = inspect.getmembers(__lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
UpperCamelCase__ = """k-diffusion"""
elif backend == "invisible_watermark":
UpperCamelCase__ = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 548 | 1 |
A : Dict = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
A : str = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = from_type.lower().strip("s" )
SCREAMING_SNAKE_CASE_ = to_type.lower().strip("s" )
SCREAMING_SNAKE_CASE_ = UNIT_SYMBOL.get(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = UNIT_SYMBOL.get(__UpperCamelCase , __UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = METRIC_CONVERSION[from_sanitized]
SCREAMING_SNAKE_CASE_ = METRIC_CONVERSION[to_sanitized]
SCREAMING_SNAKE_CASE_ = 1
if from_exponent > to_exponent:
SCREAMING_SNAKE_CASE_ = from_exponent - to_exponent
else:
SCREAMING_SNAKE_CASE_ = -(to_exponent - from_exponent)
return value * pow(1_0 , __UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A : Any = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self , a__ = 768 , ):
super().__init__()
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.zeros(1 , a__ ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.ones(1 , a__ ) )
def __A ( self , a__ = None , a__ = None , ):
_lowerCAmelCase : Any = nn.Parameter(self.mean.to(a__ ).to(a__ ) )
_lowerCAmelCase : Any = nn.Parameter(self.std.to(a__ ).to(a__ ) )
return self
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = (embeds * self.std) + self.mean
return embeds
| 712 | """simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_lowerCAmelCase: Optional[int] = None
try:
import msvcrt
except ImportError:
_lowerCAmelCase: List[str] = None
try:
import fcntl
except ImportError:
_lowerCAmelCase: Dict = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_lowerCAmelCase: str = OSError
# Data
# ------------------------------------------------
_lowerCAmelCase: Optional[Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
_lowerCAmelCase: List[Any] = '3.0.12'
_lowerCAmelCase: Any = None
def _lowercase( ):
global _logger
a__ =_logger or logging.getLogger(__name__ )
return _logger
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_) -> Dict:
a__ =lock_file
return None
def __str__( self) -> Tuple:
a__ =F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class lowercase_ :
def __init__( self , lowercase_) -> Union[str, Any]:
a__ =lock
return None
def __enter__( self) -> Dict:
return self.lock
def __exit__( self , lowercase_ , lowercase_ , lowercase_) -> int:
self.lock.release()
return None
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=-1 , lowercase_=None) -> Any:
a__ =max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
a__ =self.hash_filename_if_too_long(lowercase_ , lowercase_)
# The path to the lock file.
a__ =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
a__ =None
# The default timeout value.
a__ =timeout
# We use this lock primarily for the lock counter.
a__ =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
a__ =0
return None
@property
def __UpperCamelCase ( self) -> str:
return self._lock_file
@property
def __UpperCamelCase ( self) -> Optional[int]:
return self._timeout
@timeout.setter
def __UpperCamelCase ( self , lowercase_) -> List[str]:
a__ =float(lowercase_)
return None
def __UpperCamelCase ( self) -> Tuple:
raise NotImplementedError()
def __UpperCamelCase ( self) -> Dict:
raise NotImplementedError()
@property
def __UpperCamelCase ( self) -> int:
return self._lock_file_fd is not None
def __UpperCamelCase ( self , lowercase_=None , lowercase_=0.05) -> List[str]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
a__ =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
a__ =id(self)
a__ =self._lock_file
a__ =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""")
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""")
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""")
raise Timeout(self._lock_file)
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""")
time.sleep(lowercase_)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
a__ =max(0 , self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def __UpperCamelCase ( self , lowercase_=False) -> Optional[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
a__ =id(self)
a__ =self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""")
self._release()
a__ =0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""")
return None
def __enter__( self) -> str:
self.acquire()
return self
def __exit__( self , lowercase_ , lowercase_ , lowercase_) -> List[str]:
self.release()
return None
def __del__( self) -> List[str]:
self.release(force=lowercase_)
return None
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =os.path.basename(lowercase_)
if len(lowercase_) > max_length and max_length > 0:
a__ =os.path.dirname(lowercase_)
a__ =str(hash(lowercase_))
a__ =filename[: max_length - len(lowercase_) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowercase_ , lowercase_)
else:
return path
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_=-1 , lowercase_=None) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_)
a__ ='\\\\?\\' + relative_to_absolute_path(self.lock_file)
def __UpperCamelCase ( self) -> Tuple:
a__ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
a__ =os.open(self._lock_file , lowercase_)
except OSError:
pass
else:
try:
msvcrt.locking(lowercase_ , msvcrt.LK_NBLCK , 1)
except OSError:
os.close(lowercase_)
else:
a__ =fd
return None
def __UpperCamelCase ( self) -> List[Any]:
a__ =self._lock_file_fd
a__ =None
msvcrt.locking(lowercase_ , msvcrt.LK_UNLCK , 1)
os.close(lowercase_)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_=-1 , lowercase_=None) -> Union[str, Any]:
a__ =os.statvfs(os.path.dirname(lowercase_)).f_namemax
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
a__ =os.open(self._lock_file , lowercase_)
try:
fcntl.flock(lowercase_ , fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(lowercase_)
else:
a__ =fd
return None
def __UpperCamelCase ( self) -> Union[str, Any]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
a__ =self._lock_file_fd
a__ =None
fcntl.flock(lowercase_ , fcntl.LOCK_UN)
os.close(lowercase_)
return None
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
a__ =os.open(self._lock_file , lowercase_)
except OSError:
pass
else:
a__ =fd
return None
def __UpperCamelCase ( self) -> List[str]:
os.close(self._lock_file_fd)
a__ =None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_lowerCAmelCase: Any = None
if msvcrt:
_lowerCAmelCase: int = WindowsFileLock
elif fcntl:
_lowerCAmelCase: List[str] = UnixFileLock
else:
_lowerCAmelCase: List[str] = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 20 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCamelCase = logging.getLogger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : str=-1 ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = label_idx
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(_UpperCAmelCase , F"""{mode}.txt""" )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(_UpperCAmelCase , encoding="utf-8" ) as f:
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) )
guid_index += 1
UpperCAmelCase_ = []
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = line.split(" " )
words.append(splits[0] )
if len(_UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) )
return examples
def lowercase__ ( self : Dict , _UpperCAmelCase : TextIO , _UpperCAmelCase : TextIO , _UpperCAmelCase : List ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCAmelCase_ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_UpperCAmelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
if path:
with open(_UpperCAmelCase , "r" ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().__init__(label_idx=-2 )
def lowercase__ ( self : Any , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
if path:
with open(_UpperCAmelCase , "r" ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(_UpperCAmelCase , F"""{mode}.txt""" )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(_UpperCAmelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_UpperCAmelCase ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) )
guid_index += 1
return examples
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : TextIO , _UpperCAmelCase : TextIO , _UpperCAmelCase : List ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
for sentence in parse_incr(_UpperCAmelCase ):
UpperCAmelCase_ = preds_list[example_id]
UpperCAmelCase_ = ""
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_UpperCAmelCase )
example_id += 1
def lowercase__ ( self : List[str] , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
if path:
with open(_UpperCAmelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 82 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''image''': Image()} )
UpperCamelCase = Features({'''labels''': ClassLabel} )
UpperCamelCase = "image"
UpperCamelCase = "labels"
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowercase__ ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 82 | 1 |
"""simple docstring"""
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = len(_lowercase )
lowerCamelCase__ : Dict = [[0] * n for i in range(_lowercase )]
for i in range(_lowercase ):
lowerCamelCase__ : Union[str, Any] = y_points[i]
for i in range(2 , _lowercase ):
for j in range(_lowercase , _lowercase ):
lowerCamelCase__ : Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 | """simple docstring"""
from __future__ import annotations
import math
def __a ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(_lowercase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : str = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCamelCase__ : List[str] = math.log(len(_lowercase ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , _lowercase , _lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 121 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ : Optional[int] = XLNetTokenizer
__magic_name__ : List[str] = XLNetTokenizerFast
__magic_name__ : int = True
__magic_name__ : Any = True
def lowerCamelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase : Optional[int] = XLNetTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = """<s>"""
__UpperCamelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(lowerCAmelCase ) , 1006 )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Tuple = XLNetTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
__UpperCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [285, 46, 10, 170, 382] )
__UpperCamelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__UpperCamelCase : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : int = XLNetTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase )
__UpperCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = XLNetTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase )
__UpperCamelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
__UpperCamelCase : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase )
__UpperCamelCase : str = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase )
__UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = {"""input_ids""": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 279 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def A__ (snake_case : str ) -> YolosConfig:
__UpperCamelCase : Dict = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__UpperCamelCase : Union[str, Any] = 1_92
__UpperCamelCase : Optional[int] = 7_68
__UpperCamelCase : List[Any] = 12
__UpperCamelCase : Optional[int] = 3
__UpperCamelCase : Optional[Any] = [8_00, 13_33]
__UpperCamelCase : Optional[Any] = False
elif yolos_name == "yolos_s_dWr":
__UpperCamelCase : Union[str, Any] = 3_30
__UpperCamelCase : List[Any] = 14
__UpperCamelCase : Dict = 6
__UpperCamelCase : Optional[int] = 13_20
elif "yolos_s" in yolos_name:
__UpperCamelCase : Dict = 3_84
__UpperCamelCase : Optional[int] = 15_36
__UpperCamelCase : Optional[int] = 12
__UpperCamelCase : List[str] = 6
elif "yolos_b" in yolos_name:
__UpperCamelCase : List[Any] = [8_00, 13_44]
__UpperCamelCase : Optional[Any] = 91
__UpperCamelCase : Union[str, Any] = """huggingface/label-files"""
__UpperCamelCase : List[Any] = """coco-detection-id2label.json"""
__UpperCamelCase : Any = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="""dataset""" ) , """r""" ) )
__UpperCamelCase : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase : str = idalabel
__UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def A__ (snake_case : dict , snake_case : YolosConfig , snake_case : bool = False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__UpperCamelCase : str = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : List[Any] = in_proj_weight[: config.hidden_size, :]
__UpperCamelCase : Union[str, Any] = in_proj_bias[: config.hidden_size]
__UpperCamelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase : Tuple = in_proj_weight[-config.hidden_size :, :]
__UpperCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A__ (snake_case : str ) -> str:
if "backbone" in name:
__UpperCamelCase : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__UpperCamelCase : Any = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__UpperCamelCase : Tuple = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__UpperCamelCase : int = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__UpperCamelCase : int = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__UpperCamelCase : Tuple = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__UpperCamelCase : Any = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__UpperCamelCase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__UpperCamelCase : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__UpperCamelCase : Union[str, Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__UpperCamelCase : Optional[int] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__UpperCamelCase : Tuple = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__UpperCamelCase : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__UpperCamelCase : Tuple = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__UpperCamelCase : Dict = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__UpperCamelCase : Dict = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def A__ (snake_case : dict , snake_case : YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
__UpperCamelCase : Dict = orig_state_dict.pop(snake_case )
if "qkv" in key:
__UpperCamelCase : Optional[int] = key.split(""".""" )
__UpperCamelCase : Union[str, Any] = int(key_split[2] )
__UpperCamelCase : Any = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__UpperCamelCase : Tuple = val[:dim, :]
__UpperCamelCase : str = val[
dim : dim * 2, :
]
__UpperCamelCase : int = val[-dim:, :]
else:
__UpperCamelCase : Tuple = val[:dim]
__UpperCamelCase : Any = val[dim : dim * 2]
__UpperCamelCase : str = val[-dim:]
else:
__UpperCamelCase : Optional[int] = val
return orig_state_dict
def A__ () -> torch.Tensor:
__UpperCamelCase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCamelCase : Any = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A__ (snake_case : str , snake_case : str , snake_case : str , snake_case : bool = False ) -> Optional[Any]:
__UpperCamelCase : List[str] = get_yolos_config(snake_case )
# load original state_dict
__UpperCamelCase : Tuple = torch.load(snake_case , map_location="""cpu""" )["""model"""]
# load 🤗 model
__UpperCamelCase : Dict = YolosForObjectDetection(snake_case )
model.eval()
__UpperCamelCase : int = convert_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
__UpperCamelCase : Any = 8_00 if yolos_name != """yolos_ti""" else 5_12
__UpperCamelCase : int = YolosImageProcessor(format="""coco_detection""" , size=snake_case )
__UpperCamelCase : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
__UpperCamelCase : List[Any] = model(**snake_case )
__UpperCamelCase , __UpperCamelCase : Optional[int] = outputs.logits, outputs.pred_boxes
__UpperCamelCase , __UpperCamelCase : List[Any] = None, None
if yolos_name == "yolos_ti":
__UpperCamelCase : Dict = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
__UpperCamelCase : Optional[Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
__UpperCamelCase : str = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
__UpperCamelCase : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
__UpperCamelCase : str = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
__UpperCamelCase : Optional[Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
__UpperCamelCase : Optional[int] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
__UpperCamelCase : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , snake_case , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
__UpperCamelCase : Tuple = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
__UpperCamelCase : int = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case , organization="""hustvl""" )
model.push_to_hub(snake_case , organization="""hustvl""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 279 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """ViTImageProcessor"""
snake_case_ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] , _lowercase : int=None , _lowercase : int=None , **_lowercase : int ) -> Tuple:
_lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowercase , )
_lowercase = kwargs.pop("feature_extractor" )
_lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowercase , _lowercase )
def __call__( self : Union[str, Any] , _lowercase : int=None , _lowercase : int=None , _lowercase : str=None , _lowercase : Tuple=None , **_lowercase : Dict ) -> int:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
_lowercase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if visual_prompt is not None:
_lowercase = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
_lowercase = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if visual_prompt is not None and images is not None:
_lowercase = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_lowercase = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def _lowerCamelCase ( self : Any , *_lowercase : int , **_lowercase : str ) -> List[Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _lowerCamelCase ( self : Tuple , *_lowercase : Union[str, Any] , **_lowercase : Dict ) -> Union[str, Any]:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def _lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowercase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : List[str] ) -> Any:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowercase , )
return self.image_processor | 227 | """simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__UpperCamelCase : Optional[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
__UpperCamelCase : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
__UpperCamelCase : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def _lowerCamelCase ( self : List[str] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowerCamelCase ( self : List[str] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : List[Any]=None , _lowercase : Union[str, Any]=None , _lowercase : List[str]=None , _lowercase : List[str]=None , _lowercase : Any="auto" , _lowercase : Dict=-1 , _lowercase : Union[str, Any]=0.9 , _lowercase : Optional[Any]=5 , _lowercase : int=5_0_0 , _lowercase : Tuple="gpt2-large" , _lowercase : int=-1 , _lowercase : List[str]=1_0_2_4 , _lowercase : Optional[Any]=2_5 , _lowercase : Optional[Any]=5 , _lowercase : List[str]=True , _lowercase : List[Any]=2_5 , ) -> str:
_lowercase = compute_mauve(
p_text=_lowercase , q_text=_lowercase , p_features=_lowercase , q_features=_lowercase , p_tokens=_lowercase , q_tokens=_lowercase , num_buckets=_lowercase , pca_max_data=_lowercase , kmeans_explained_var=_lowercase , kmeans_num_redo=_lowercase , kmeans_max_iter=_lowercase , featurize_model_name=_lowercase , device_id=_lowercase , max_text_length=_lowercase , divergence_curve_discretization_size=_lowercase , mauve_scaling_factor=_lowercase , verbose=_lowercase , seed=_lowercase , )
return out | 227 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :Dict = logging.get_logger(__name__)
UpperCamelCase__ :List[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ :Optional[int] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase__ :Tuple = {
"""facebook/nllb-large-en-ro""": 1_024,
"""facebook/nllb-200-distilled-600M""": 1_024,
}
# fmt: off
UpperCamelCase__ :Union[str, Any] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class A( lowerCamelCase__ ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_VOCAB_FILES_MAP
A = ["input_ids", "attention_mask"]
A = NllbTokenizer
A = []
A = []
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
_UpperCamelCase :List[str] = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :Union[str, Any] = vocab_file
_UpperCamelCase :Optional[Any] = False if not self.vocab_file else True
_UpperCamelCase :Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
_UpperCamelCase :List[Any] = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCamelCase :Union[str, Any] = src_lang if src_lang is not None else '''eng_Latn'''
_UpperCamelCase :Any = self.convert_tokens_to_ids(self._src_lang )
_UpperCamelCase :int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = [self.sep_token_id]
_UpperCamelCase :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_UpperCamelCase :Dict = src_lang
_UpperCamelCase :List[Any] = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Any = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :int = tgt_lang_id
return inputs
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "eng_Latn" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "fra_Latn" , **SCREAMING_SNAKE_CASE__ , ) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase :str = src_lang
_UpperCamelCase :int = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
_UpperCamelCase :int = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
_UpperCamelCase :Union[str, Any] = []
_UpperCamelCase :Tuple = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase :Union[str, Any] = [self.cur_lang_code]
_UpperCamelCase :int = [self.eos_token_id]
_UpperCamelCase :Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase :Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase :Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
_UpperCamelCase :List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
_UpperCamelCase :Union[str, Any] = []
_UpperCamelCase :Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase :str = [self.cur_lang_code]
_UpperCamelCase :Optional[int] = [self.eos_token_id]
_UpperCamelCase :str = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase :Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase :Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
_UpperCamelCase :Dict = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 355 |
"""simple docstring"""
import os
def A_ ( ) -> Any:
with open(os.path.dirname(snake_case__ ) + '''/p022_names.txt''' ) as file:
_UpperCamelCase :Optional[Any] = str(file.readlines()[0] )
_UpperCamelCase :Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
_UpperCamelCase :str = 0
_UpperCamelCase :Union[str, Any] = 0
for i, name in enumerate(snake_case__ ):
for letter in name:
name_score += ord(snake_case__ ) - 64
total_score += (i + 1) * name_score
_UpperCamelCase :List[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 355 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 711 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ['image_processor', 'tokenizer']
lowerCAmelCase__ : Any = 'CLIPImageProcessor'
lowerCAmelCase__ : Any = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self: str , __lowerCAmelCase: List[Any]=None , __lowerCAmelCase: Any=None , **__lowerCAmelCase: str ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCAmelCase , )
__UpperCAmelCase = kwargs.pop("feature_extractor" )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self: Optional[int] , __lowerCAmelCase: Union[str, Any]=None , __lowerCAmelCase: Dict=None , __lowerCAmelCase: Optional[int]=None , **__lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__UpperCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def _UpperCAmelCase ( self: str , *__lowerCAmelCase: str , **__lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def _UpperCAmelCase ( self: Any , *__lowerCAmelCase: List[Any] , **__lowerCAmelCase: Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def _UpperCAmelCase ( self: Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer.model_input_names
__UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCAmelCase , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self: List[Any] ) -> List[str]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCAmelCase , )
return self.image_processor
| 286 | 0 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase__: List[str] = 0b1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase__: List[str] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : int ) -> Optional[Any]:
UpperCAmelCase : Any = WATERMARK_BITS
UpperCAmelCase : int = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def A ( self : str , __snake_case : torch.FloatTensor ) -> Tuple:
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
UpperCAmelCase : List[Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase : Optional[Any] = [self.encoder.encode(__snake_case , '''dwtDct''' ) for image in images]
UpperCAmelCase : Union[str, Any] = torch.from_numpy(np.array(__snake_case ) ).permute(0 , 3 , 1 , 2 )
UpperCAmelCase : Optional[int] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 127 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=7 , __snake_case : int=3 , __snake_case : List[str]=18 , __snake_case : Optional[int]=30 , __snake_case : List[Any]=400 , __snake_case : Dict=True , __snake_case : Tuple=None , __snake_case : str=True , __snake_case : List[str]=None , __snake_case : List[str]=True , __snake_case : int=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , __snake_case : Union[str, Any]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , __snake_case : str=True , ) -> List[str]:
UpperCAmelCase : int = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Any = image_size
UpperCAmelCase : Optional[Any] = min_resolution
UpperCAmelCase : str = max_resolution
UpperCAmelCase : List[str] = do_resize
UpperCAmelCase : List[Any] = size
UpperCAmelCase : Optional[Any] = do_center_crop
UpperCAmelCase : int = crop_size
UpperCAmelCase : int = do_normalize
UpperCAmelCase : List[Any] = image_mean
UpperCAmelCase : Dict = image_std
UpperCAmelCase : List[Any] = do_convert_rgb
def A ( self : Any ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def A ( self : Dict , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=False , __snake_case : Optional[Any]=False ) -> Optional[int]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCAmelCase : int = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
UpperCAmelCase : str = []
for i in range(self.batch_size ):
UpperCAmelCase , UpperCAmelCase : Dict = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
if torchify:
UpperCAmelCase : List[Any] = [torch.from_numpy(__snake_case ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def A ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : str = ChineseCLIPImageProcessingTester(self , do_center_crop=__snake_case )
@property
def A ( self : List[Any] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Any ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_convert_rgb''' ) )
def A ( self : int ) -> List[str]:
UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def A ( self : Optional[Any] ) -> int:
pass
def A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Any = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : int = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : List[Any] ) -> str:
# Initialize image_processing
UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : Any = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__snake_case )
UpperCAmelCase : Union[str, Any] = 3
@property
def A ( self : List[Any] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_convert_rgb''' ) )
def A ( self : Dict ) -> Optional[Any]:
pass
def A ( self : Optional[int] ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : str = self.image_processor_tester.prepare_inputs(equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase : int = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 127 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__A : Union[str, Any] = True
except ImportError:
__A : str = False
try:
from torch.hub import _get_torch_home
__A : List[str] = _get_torch_home()
except ImportError:
__A : Union[str, Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
__A : List[Any] = os.path.join(torch_cache_home, "transformers")
__A : Optional[int] = "https://cdn.huggingface.co"
__A : Optional[int] = "https://s3.amazonaws.com/models.huggingface.co/bert"
__A : List[Any] = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
__A : List[Any] = os.path.join(PATH, "config.yaml")
__A : str = os.path.join(PATH, "attributes.txt")
__A : Union[str, Any] = os.path.join(PATH, "objects.txt")
__A : List[str] = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
__A : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
__A : Tuple = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
__A : int = "pytorch_model.bin"
__A : str = "config.yaml"
def lowercase ( UpperCamelCase : int=OBJECTS , UpperCamelCase : str=ATTRIBUTES ):
"""simple docstring"""
A__ : str =[]
with open(UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
A__ : Optional[int] =[]
with open(UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Union[str, Any] =OrderedDict()
with open(UpperCamelCase , "rb" ) as f:
A__ : Any =pkl.load(UpperCamelCase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
A__ : Optional[Any] =ckp.pop(UpperCamelCase )
if isinstance(UpperCamelCase , np.ndarray ):
A__ : List[str] =torch.tensor(UpperCamelCase )
else:
assert isinstance(UpperCamelCase , torch.tensor ), type(UpperCamelCase )
A__ : str =v
return r
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
def __init__( self : Any , UpperCamelCase__ : dict , UpperCamelCase__ : str = "root" , UpperCamelCase__ : int=0 ):
A__ : List[Any] =name
A__ : List[Any] =level
A__ : Dict ={}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A__ : Tuple =copy.deepcopy(UpperCamelCase__ )
A__ : Optional[Any] =copy.deepcopy(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict =Config(UpperCamelCase__ , name=UpperCamelCase__ , level=level + 1 )
A__ : Tuple =v
setattr(self , UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple =d
def __repr__( self : Optional[int] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
A__ : str =val
A__ : str =val
A__ : str =key.split("." )
A__ : Optional[int] =len(UpperCamelCase__ ) - 1
A__ : int =self._pointer
if len(UpperCamelCase__ ) > 1:
for i, l in enumerate(UpperCamelCase__ ):
if hasattr(self , UpperCamelCase__ ) and isinstance(getattr(self , UpperCamelCase__ ) , UpperCamelCase__ ):
setattr(getattr(self , UpperCamelCase__ ) , ".".join(levels[i:] ) , UpperCamelCase__ )
if l == last_level:
A__ : int =val
else:
A__ : Optional[int] =pointer[l]
def _UpperCAmelCase ( self : int ):
return self._pointer
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
with open(F'''{file_name}''' , "w" ) as stream:
dump(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[Any] ):
with open(UpperCamelCase__ ) as stream:
A__ : Optional[int] =load(UpperCamelCase__ , Loader=UpperCamelCase__ )
return data
def __str__( self : Union[str, Any] ):
A__ : int =" "
if self._name != "root":
A__ : Dict =F'''{t * (self._level-1)}{self._name}:\n'''
else:
A__ : Union[str, Any] =""
A__ : Any =self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(UpperCamelCase__ ).__name__})\n'''
A__ : Union[str, Any] =level
return r[:-1]
@classmethod
def _UpperCAmelCase ( cls : List[str] , UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ):
A__ , A__ : Dict =cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
return cls(UpperCamelCase__ )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , UpperCamelCase__ : str , **UpperCamelCase__ : List[str] ):
A__ : Union[str, Any] =kwargs.pop("cache_dir" , UpperCamelCase__ )
A__ : int =kwargs.pop("force_download" , UpperCamelCase__ )
A__ : Any =kwargs.pop("resume_download" , UpperCamelCase__ )
A__ : Tuple =kwargs.pop("proxies" , UpperCamelCase__ )
A__ : str =kwargs.pop("local_files_only" , UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
A__ : List[Any] =os.path.join(UpperCamelCase__ , UpperCamelCase__ )
elif os.path.isfile(UpperCamelCase__ ) or is_remote_url(UpperCamelCase__ ):
A__ : Optional[int] =pretrained_model_name_or_path
else:
A__ : Optional[Any] =hf_bucket_url(UpperCamelCase__ , filename=UpperCamelCase__ , use_cdn=UpperCamelCase__ )
try:
# Load from URL or cache if already cached
A__ : List[Any] =cached_path(
UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , proxies=UpperCamelCase__ , resume_download=UpperCamelCase__ , local_files_only=UpperCamelCase__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A__ : Optional[int] =Config.load_yaml(UpperCamelCase__ )
except EnvironmentError:
A__ : Tuple ="Can't load config for"
raise EnvironmentError(UpperCamelCase__ )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(UpperCamelCase__ ), kwargs
def lowercase ( UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Dict =torch.load("dump.pt" , map_location=in_tensor.device )
A__ : str =in_tensor.numpy()
A__ : Any =out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(UpperCamelCase , UpperCamelCase , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(UpperCamelCase , UpperCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
A__ : Optional[int] =urlparse(UpperCamelCase )
return parsed.scheme in ("http", "https")
def lowercase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[str]=True ):
"""simple docstring"""
A__ : int =CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A__ : Optional[int] ="/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : Union[str, Any]=None , ):
"""simple docstring"""
A__ : Dict ="python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + "; ".join("{}/{}".format(UpperCamelCase , UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + user_agent
A__ : Optional[int] ={"user-agent": ua}
if resume_size > 0:
A__ : str ="bytes=%d-" % (resume_size,)
A__ : Dict =requests.get(UpperCamelCase , stream=UpperCamelCase , proxies=UpperCamelCase , headers=UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
A__ : Tuple =response.headers.get("Content-Length" )
A__ : Union[str, Any] =resume_size + int(UpperCamelCase ) if content_length is not None else None
A__ : str =tqdm(
unit="B" , unit_scale=UpperCamelCase , total=UpperCamelCase , initial=UpperCamelCase , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(UpperCamelCase ) )
temp_file.write(UpperCamelCase )
progress.close()
def lowercase ( UpperCamelCase : List[Any] , UpperCamelCase : str=None , UpperCamelCase : int=False , UpperCamelCase : Optional[int]=None , UpperCamelCase : Dict=10 , UpperCamelCase : List[Any]=False , UpperCamelCase : str=None , UpperCamelCase : int=False , ):
"""simple docstring"""
if cache_dir is None:
A__ : int =TRANSFORMERS_CACHE
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ : Union[str, Any] =str(UpperCamelCase )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
A__ : int =None
if not local_files_only:
try:
A__ : Union[str, Any] =requests.head(UpperCamelCase , allow_redirects=UpperCamelCase , proxies=UpperCamelCase , timeout=UpperCamelCase )
if response.status_code == 200:
A__ : str =response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A__ : Any =url_to_filename(UpperCamelCase , UpperCamelCase )
# get cache path to put the file
A__ : Dict =os.path.join(UpperCamelCase , UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(UpperCamelCase ):
return cache_path
else:
A__ : Dict =[
file
for file in fnmatch.filter(os.listdir(UpperCamelCase ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(UpperCamelCase ) > 0:
return os.path.join(UpperCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A__ : Tuple =cache_path + ".lock"
with FileLock(UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A__ : List[Any] =cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(UpperCamelCase , "a+b" ) as f:
yield f
A__ : List[str] =_resumable_file_manager
if os.path.exists(UpperCamelCase ):
A__ : Dict =os.stat(UpperCamelCase ).st_size
else:
A__ : Optional[Any] =0
else:
A__ : Any =partial(tempfile.NamedTemporaryFile , dir=UpperCamelCase , delete=UpperCamelCase )
A__ : Optional[int] =0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , UpperCamelCase , temp_file.name , )
http_get(
UpperCamelCase , UpperCamelCase , proxies=UpperCamelCase , resume_size=UpperCamelCase , user_agent=UpperCamelCase , )
os.replace(temp_file.name , UpperCamelCase )
A__ : str ={"url": url, "etag": etag}
A__ : int =cache_path + ".json"
with open(UpperCamelCase , "w" ) as meta_file:
json.dump(UpperCamelCase , UpperCamelCase )
return cache_path
def lowercase ( UpperCamelCase : Optional[int] , UpperCamelCase : Any=None ):
"""simple docstring"""
A__ : Optional[int] =url.encode("utf-8" )
A__ : Any =shaaaa(UpperCamelCase )
A__ : List[str] =url_hash.hexdigest()
if etag:
A__ : str =etag.encode("utf-8" )
A__ : Dict =shaaaa(UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase ( UpperCamelCase : int , UpperCamelCase : Tuple=None , UpperCamelCase : int=False , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Tuple=False , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[str]=False , UpperCamelCase : str=False , UpperCamelCase : Optional[int]=False , ):
"""simple docstring"""
if cache_dir is None:
A__ : str =TRANSFORMERS_CACHE
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ : Optional[Any] =str(UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ : List[str] =str(UpperCamelCase )
if is_remote_url(UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
A__ : Optional[Any] =get_from_cache(
UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , user_agent=UpperCamelCase , local_files_only=UpperCamelCase , )
elif os.path.exists(UpperCamelCase ):
# File, and it exists.
A__ : Dict =url_or_filename
elif urlparse(UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(UpperCamelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(UpperCamelCase ) and not tarfile.is_tarfile(UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A__ , A__ : Tuple =os.path.split(UpperCamelCase )
A__ : Union[str, Any] =output_file.replace("." , "-" ) + "-extracted"
A__ : List[Any] =os.path.join(UpperCamelCase , UpperCamelCase )
if os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A__ : Tuple =output_path + ".lock"
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
os.makedirs(UpperCamelCase )
if is_zipfile(UpperCamelCase ):
with ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(UpperCamelCase ):
A__ : List[Any] =tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(UpperCamelCase ) )
return output_path_extracted
return output_path
def lowercase ( UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any]="," ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase )
if os.path.isfile(UpperCamelCase ):
with open(UpperCamelCase ) as f:
A__ : int =eval(f.read() )
else:
A__ : Dict =requests.get(UpperCamelCase )
try:
A__ : Optional[Any] =requests.json()
except Exception:
A__ : Dict =req.content.decode()
assert data is not None, "could not connect"
try:
A__ : Tuple =eval(UpperCamelCase )
except Exception:
A__ : Any =data.split("\n" )
req.close()
return data
def lowercase ( UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A__ : Optional[Any] =requests.get(UpperCamelCase )
A__ : Dict =np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A__ : List[str] =url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(UpperCamelCase )
with open(UpperCamelCase , "rb" ) as stream:
A__ : Union[str, Any] =pkl.load(UpperCamelCase )
A__ : Tuple =weights.pop("model" )
A__ : Dict ={}
for k, v in model.items():
A__ : Any =torch.from_numpy(UpperCamelCase )
if "running_var" in k:
A__ : List[str] =torch.tensor([0] )
A__ : int =k.replace("running_var" , "num_batches_tracked" )
A__ : Optional[Any] =zero
return new
def lowercase ( ):
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(UpperCamelCase , os.pardir ) )}/demo.ipynb''' )
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any]="RGB" ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase )
if os.path.isfile(UpperCamelCase ):
A__ : List[Any] =cva.imread(UpperCamelCase )
else:
A__ : Any =get_image_from_url(UpperCamelCase )
assert img is not None, F'''could not connect to: {im}'''
A__ : Tuple =cva.cvtColor(UpperCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
A__ : Optional[int] =img[:, :, ::-1]
return img
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Any=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ))
| 595 | """simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A : Optional[int] = get_logger(__name__)
__A : Dict = Path(__file__).parent / "model_card_template.md"
__A : Dict = uuida().hex
__A : Union[str, Any] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
__A : List[str] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
__A : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def lowercase ( UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A__ : Union[str, Any] =F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + user_agent
return ua
def lowercase ( UpperCamelCase : str , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A__ : Tuple =HfFolder.get_token()
if organization is None:
A__ : Tuple =whoami(UpperCamelCase )["name"]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def lowercase ( UpperCamelCase : List[Any] , UpperCamelCase : str ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(UpperCamelCase , "local_rank" ) and args.local_rank not in [-1, 0]:
return
A__ : int =args.hub_token if hasattr(UpperCamelCase , "hub_token" ) else None
A__ : Dict =get_full_repo_name(UpperCamelCase , token=UpperCamelCase )
A__ : Any =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCamelCase , model_name=UpperCamelCase , repo_name=UpperCamelCase , dataset_name=args.dataset_name if hasattr(UpperCamelCase , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCamelCase , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCamelCase , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCamelCase , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCamelCase , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCamelCase , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCamelCase , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(UpperCamelCase , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCamelCase , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
A__ : str =os.path.join(args.output_dir , "README.md" )
model_card.save(UpperCamelCase )
def lowercase ( UpperCamelCase : Optional[str] , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A__ : Any =str(Path(UpperCamelCase ).as_posix() )
A__ : List[Any] =re.search(R"snapshots/([^/]+)/" , UpperCamelCase )
if search is None:
return None
A__ : List[Any] =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A : Optional[Any] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
__A : Dict = os.path.join(hf_cache_home, "diffusers")
def lowercase ( UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A__ : List[Any] =DIFFUSERS_CACHE
if old_cache_dir is None:
A__ : Optional[Any] =old_diffusers_cache
A__ : int =Path(UpperCamelCase ).expanduser()
A__ : Any =Path(UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A__ : List[Any] =new_cache_dir / old_blob_path.relative_to(UpperCamelCase )
new_blob_path.parent.mkdir(parents=UpperCamelCase , exist_ok=UpperCamelCase )
os.replace(UpperCamelCase , UpperCamelCase )
try:
os.symlink(UpperCamelCase , UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A : List[str] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
__A : str = 0
else:
with open(cache_version_file) as f:
try:
__A : Optional[Any] = int(f.read())
except ValueError:
__A : List[Any] = 0
if cache_version < 1:
__A : str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
__A : str = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"the directory exists and can be written to."
)
def lowercase ( UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A__ : Dict =weights_name.split("." )
A__ : List[str] =splits[:-1] + [variant] + splits[-1:]
A__ : str =".".join(UpperCamelCase )
return weights_name
def lowercase ( UpperCamelCase : List[str] , *,
UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any]=None , ):
"""simple docstring"""
A__ : Optional[int] =str(UpperCamelCase )
if os.path.isfile(UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCamelCase ):
if os.path.isfile(os.path.join(UpperCamelCase , UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A__ : Tuple =os.path.join(UpperCamelCase , UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCamelCase , UpperCamelCase , UpperCamelCase ) ):
A__ : str =os.path.join(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A__ : Any =hf_hub_download(
UpperCamelCase , filename=_add_variant(UpperCamelCase , UpperCamelCase ) , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , local_files_only=UpperCamelCase , use_auth_token=UpperCamelCase , user_agent=UpperCamelCase , subfolder=UpperCamelCase , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , UpperCamelCase , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCamelCase , UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(UpperCamelCase , UpperCamelCase )}\' so that the correct variant file can be added.''' , UpperCamelCase , )
try:
# 2. Load model file as usual
A__ : List[Any] =hf_hub_download(
UpperCamelCase , filename=UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , local_files_only=UpperCamelCase , use_auth_token=UpperCamelCase , user_agent=UpperCamelCase , subfolder=UpperCamelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 595 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : List[str] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =PegasusTokenizer
__UpperCAmelCase : str =PegasusTokenizerFast
__UpperCAmelCase : List[str] =True
__UpperCAmelCase : List[Any] =True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = PegasusTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def snake_case ( self , **__a ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , __a ):
return ("This is a test", "This is a test")
def snake_case ( self ):
__lowerCAmelCase = "</s>"
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def snake_case ( self ):
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(__a ) , 11_03 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def snake_case ( self ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=__a , add_special_tokens=__a ).input_ids[0]
__lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=__a , add_special_tokens=__a ).input_ids[0]
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__lowerCAmelCase = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=__a ).input_ids[0]
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
__lowerCAmelCase = "To ensure a smooth flow of bank resolutions."
__lowerCAmelCase = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=__a ).input_ids[0]
self.assertListEqual(__a , __a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case ( self ):
__lowerCAmelCase = ["This is going to be way too long." * 1_50, "short example"]
__lowerCAmelCase = ["not super long but more than 5 tokens", "tiny"]
__lowerCAmelCase = self._large_tokenizer(__a , padding=__a , truncation=__a , return_tensors="pt" )
__lowerCAmelCase = self._large_tokenizer(
text_target=__a , max_length=5 , padding=__a , truncation=__a , return_tensors="pt" )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(__a ) == 2 # input_ids, attention_mask.
@slow
def snake_case ( self ):
# fmt: off
__lowerCAmelCase = {"input_ids": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Tuple =PegasusTokenizer
__UpperCAmelCase : str =PegasusTokenizerFast
__UpperCAmelCase : str =True
__UpperCAmelCase : int =True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = PegasusTokenizer(__a , offset=0 , mask_token_sent=__a , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def snake_case ( self , **__a ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , __a ):
return ("This is a test", "This is a test")
def snake_case ( self ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=__a , add_special_tokens=__a ).input_ids[0]
__lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=__a , add_special_tokens=__a ).input_ids[0]
self.assertListEqual(__a , __a )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = ["This is going to be way too long." * 10_00, "short example"]
__lowerCAmelCase = ["not super long but more than 5 tokens", "tiny"]
__lowerCAmelCase = self._large_tokenizer(__a , padding=__a , truncation=__a , return_tensors="pt" )
__lowerCAmelCase = self._large_tokenizer(
text_target=__a , max_length=5 , padding=__a , truncation=__a , return_tensors="pt" )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(__a ) == 2 # input_ids, attention_mask.
def snake_case ( self ):
__lowerCAmelCase = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__lowerCAmelCase = self._large_tokenizer(__a ).input_ids
self.assertListEqual(
__a , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 636 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
__lowerCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
sd_pipe.set_scheduler("sample_euler" )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe([prompt] , generator=__a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__lowerCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
sd_pipe.set_scheduler("sample_euler" )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe([prompt] , generator=__a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def snake_case ( self ):
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__lowerCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=__a , )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 636 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """gptsan-japanese"""
__UpperCAmelCase = [
"""past_key_values""",
]
__UpperCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any] , snake_case_ : Optional[int]=3_6_0_0_0 , snake_case_ : Tuple=1_2_8_0 , snake_case_ : Union[str, Any]=1_0_2_4 , snake_case_ : str=8_1_9_2 , snake_case_ : List[str]=4_0_9_6 , snake_case_ : List[Any]=1_2_8 , snake_case_ : Optional[Any]=1_0 , snake_case_ : Tuple=0 , snake_case_ : int=1_6 , snake_case_ : Union[str, Any]=1_6 , snake_case_ : Dict=1_2_8 , snake_case_ : Optional[int]=0.0 , snake_case_ : List[str]=1e-5 , snake_case_ : List[Any]=False , snake_case_ : Optional[Any]=0.0 , snake_case_ : int="float32" , snake_case_ : Dict=False , snake_case_ : Union[str, Any]=False , snake_case_ : List[Any]=False , snake_case_ : List[Any]=0.0_0_2 , snake_case_ : Union[str, Any]=False , snake_case_ : Dict=True , snake_case_ : Union[str, Any]=3_5_9_9_8 , snake_case_ : List[str]=3_5_9_9_5 , snake_case_ : List[str]=3_5_9_9_9 , **snake_case_ : Optional[int] , ):
'''simple docstring'''
snake_case__ : int = vocab_size
snake_case__ : str = max_position_embeddings
snake_case__ : Dict = d_model
snake_case__ : Tuple = d_ff
snake_case__ : str = d_ext
snake_case__ : Tuple = d_spout
snake_case__ : Union[str, Any] = num_switch_layers
snake_case__ : Tuple = num_ext_layers
snake_case__ : Dict = num_switch_layers + num_ext_layers
snake_case__ : List[Any] = num_heads
snake_case__ : Any = num_experts
snake_case__ : Tuple = expert_capacity
snake_case__ : Optional[Any] = dropout_rate
snake_case__ : Dict = layer_norm_epsilon
snake_case__ : str = router_bias
snake_case__ : int = router_jitter_noise
snake_case__ : List[Any] = router_dtype
snake_case__ : Union[str, Any] = router_ignore_padding_tokens
snake_case__ : Optional[int] = output_hidden_states
snake_case__ : Union[str, Any] = output_attentions
snake_case__ : Union[str, Any] = initializer_factor
snake_case__ : Union[str, Any] = output_router_logits
snake_case__ : Any = use_cache
super().__init__(
separator_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
| 502 |
'''simple docstring'''
def _a ( __lowerCAmelCase : int ):
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case__ : Any = 4
snake_case__ : int = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ : Tuple = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 502 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = metric_id
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = [MetricMock(snake_case ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def lowerCAmelCase_ ( __A : Dict , __A : List[str] , __A : Tuple , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
if "tmp_path" in args:
snake_case: List[str] = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__A , match='https://huggingface.co/docs/evaluate' ):
func(*__A ) | 329 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ConsistencyModelPipeline
__UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if class_cond:
snake_case: Optional[int] = self.dummy_cond_unet
else:
snake_case: List[str] = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: int = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
snake_case: Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
snake_case: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Any = self.get_dummy_components()
snake_case: List[str] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: List[Any] = image[0, -3:, -3:, -1]
snake_case: List[Any] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Optional[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: str = 0
snake_case: List[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: Dict = image[0, -3:, -3:, -1]
snake_case: int = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Optional[Any] = self.get_dummy_components()
snake_case: Optional[int] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: str = 1
snake_case: Dict = None
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: Dict = image[0, -3:, -3:, -1]
snake_case: Tuple = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case: Dict = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
snake_case: Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = 1
snake_case: List[str] = None
snake_case: Optional[Any] = 0
snake_case: Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
snake_case: str = image[0, -3:, -3:, -1]
snake_case: str = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="cpu" , SCREAMING_SNAKE_CASE__=torch.floataa , SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ):
'''simple docstring'''
snake_case: Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
snake_case: str = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = latents
return inputs
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="cpu" , SCREAMING_SNAKE_CASE__=torch.floataa , SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ):
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE__ ) == str:
snake_case: Dict = torch.device(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
return latents
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Union[str, Any] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.get_inputs()
snake_case: List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Optional[int] = image[0, -3:, -3:, -1]
snake_case: List[Any] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.get_inputs()
snake_case: List[Any] = 1
snake_case: Union[str, Any] = None
snake_case: str = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Dict = image[0, -3:, -3:, -1]
snake_case: int = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Tuple = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
snake_case: Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Optional[Any] = image[0, -3:, -3:, -1]
snake_case: Optional[Any] = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
snake_case: Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case: Tuple = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
snake_case: int = 1
snake_case: Optional[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
snake_case: Tuple = image[0, -3:, -3:, -1]
snake_case: Union[str, Any] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 329 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class a__ ( A__ ):
A = ['input_features', 'attention_mask']
def __init__( self : List[Any],_A : Dict=80,_A : int=1_6000,_A : int=0.0,_A : List[str]=10,_A : Any=25,_A : Tuple="hamming_window",_A : Optional[int]=3_2768.0,_A : Optional[Any]=0.97,_A : Dict=1.0,_A : Optional[int]=True,_A : Tuple=True,_A : str=False,**_A : int,):
"""simple docstring"""
super().__init__(feature_size=__lowerCAmelCase,sampling_rate=__lowerCAmelCase,padding_value=__lowerCAmelCase,**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = feature_size
SCREAMING_SNAKE_CASE_ : int = sampling_rate
SCREAMING_SNAKE_CASE_ : Dict = padding_value
SCREAMING_SNAKE_CASE_ : Any = hop_length
SCREAMING_SNAKE_CASE_ : List[str] = win_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = frame_signal_scale
SCREAMING_SNAKE_CASE_ : List[Any] = preemphasis_coeff
SCREAMING_SNAKE_CASE_ : Dict = mel_floor
SCREAMING_SNAKE_CASE_ : List[str] = normalize_means
SCREAMING_SNAKE_CASE_ : Tuple = normalize_vars
SCREAMING_SNAKE_CASE_ : Optional[int] = win_function
SCREAMING_SNAKE_CASE_ : Tuple = return_attention_mask
SCREAMING_SNAKE_CASE_ : Optional[int] = win_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE_ : Any = hop_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE_ : int = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE_ : str = (self.n_fft // 2) + 1
def __UpperCamelCase ( self : str,_A : Union[str, Any] ):
"""simple docstring"""
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE_ : Tuple = window_function(window_length=self.sample_size,name=self.win_function,periodic=__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple = window_function(window_length=self.sample_size,name=self.win_function )
SCREAMING_SNAKE_CASE_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
SCREAMING_SNAKE_CASE_ : Dict = spectrogram(
one_waveform * self.frame_signal_scale,window=__lowerCAmelCase,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__lowerCAmelCase,preemphasis=self.preemphasis_coeff,mel_filters=__lowerCAmelCase,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def __UpperCamelCase ( self : List[str],_A : Any,_A : Dict,_A : List[str] ):
"""simple docstring"""
if self.normalize_means:
SCREAMING_SNAKE_CASE_ : Dict = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.subtract(__lowerCAmelCase,__lowerCAmelCase )
if self.normalize_vars:
SCREAMING_SNAKE_CASE_ : Optional[int] = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.divide(__lowerCAmelCase,__lowerCAmelCase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE_ : List[str] = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE_ : Union[str, Any] = x.astype(np.floataa )
return x
def __UpperCamelCase ( self : int,_A : Optional[Any],_A : Optional[int] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__lowerCAmelCase,__lowerCAmelCase,self.padding_value ) for x, n in zip(__lowerCAmelCase,__lowerCAmelCase )]
def __call__( self : Any,_A : Optional[Any],_A : int = False,_A : str = None,_A : Dict = False,_A : Any = None,_A : Tuple = None,_A : List[Any] = None,_A : str = None,**_A : Any,):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE_ : Tuple = isinstance(__lowerCAmelCase,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE_ : Dict = is_batched_numpy or (
isinstance(__lowerCAmelCase,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : int = [np.asarray(__lowerCAmelCase,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase,np.ndarray ):
SCREAMING_SNAKE_CASE_ : Dict = np.asarray(__lowerCAmelCase,dtype=np.floataa )
elif isinstance(__lowerCAmelCase,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : Optional[int] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self._extract_mfsc_features(__lowerCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : List[Any] = BatchFeature({"input_features": features} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.pad(
__lowerCAmelCase,padding=__lowerCAmelCase,max_length=__lowerCAmelCase,truncation=__lowerCAmelCase,pad_to_multiple_of=__lowerCAmelCase,return_attention_mask=__lowerCAmelCase,**__lowerCAmelCase,)
# make sure list is in array format
SCREAMING_SNAKE_CASE_ : Tuple = padded_inputs.get("input_features" )
if isinstance(input_features[0],__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.asarray(__lowerCAmelCase,dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE_ : int = padded_inputs.get("attention_mask" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Dict = [np.asarray(__lowerCAmelCase,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE_ : Dict = (
np.array(__lowerCAmelCase,dtype=np.intaa )
if self._get_padding_strategies(__lowerCAmelCase,max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.normalize(
padded_inputs["input_features"],attention_mask=__lowerCAmelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
| 707 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : str = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 316 | 0 |
'''simple docstring'''
from math import factorial
def snake_case__ ( _A: int = 100 ) -> int:
'''simple docstring'''
return sum(int(_A ) for x in str(factorial(_A ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 370 | '''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case__ ( _A: Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = [False] * len(_A )
lowerCAmelCase = [-1] * len(_A )
def dfs(_A: Dict , _A: Tuple ):
lowerCAmelCase = True
lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(_A , 1 - c )
for i in range(len(_A ) ):
if not visited[i]:
dfs(_A , 0 )
for i in range(len(_A ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowercase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 370 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Union[str, Any] = logging.get_logger(__name__)
def __a ( __UpperCAmelCase , __UpperCAmelCase=False ):
a__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
a__ = ''''''
else:
a__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
a__ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[
: config.hidden_size, :
]
a__ = in_proj_bias[: config.hidden_size]
a__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ = in_proj_weight[
-config.hidden_size :, :
]
a__ = in_proj_bias[-config.hidden_size :]
def __a ( __UpperCAmelCase ):
a__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = dct.pop(__UpperCAmelCase )
a__ = val
def __a ( ):
a__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ):
a__ = ViTConfig()
# patch_size
if model_name[-1] == "8":
a__ = 8
# set labels if required
if not base_model:
a__ = 1000
a__ = '''huggingface/label-files'''
a__ = '''imagenet-1k-id2label.json'''
a__ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
a__ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a__ = 384
a__ = 1536
a__ = 12
a__ = 6
# load original model from torch hub
a__ = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a__ = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
a__ = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
a__ = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
a__ = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
a__ = ViTImageProcessor()
a__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
a__ = encoding['''pixel_values''']
a__ = model(__UpperCAmelCase )
if base_model:
a__ = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
a__ = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
a_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 148 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = LayoutLMTokenizer
_lowercase : Optional[int] = LayoutLMTokenizerFast
_lowercase : List[Any] = True
_lowercase : List[Any] = True
def _UpperCAmelCase ( self ) -> List[str]:
super().setUp()
a__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
a__ = '''UNwant\u00E9d,running'''
a__ = '''unwanted, running'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> Any:
a__ = self.tokenizer_class(self.vocab_file )
a__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [7, 4, 5, 1_0, 8, 9] )
def _UpperCAmelCase ( self ) -> Tuple:
pass
| 148 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_SCREAMING_SNAKE_CASE ) )]
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
_UpperCAmelCase = all_rotations(_SCREAMING_SNAKE_CASE )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCAmelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_SCREAMING_SNAKE_CASE ),
}
return response
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
_UpperCAmelCase = [''''''] * len(_SCREAMING_SNAKE_CASE )
for _ in range(len(_SCREAMING_SNAKE_CASE ) ):
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__A : Union[str, Any] = "Provide a string that I will generate its BWT transform: "
__A : Optional[Any] = input(entry_msg).strip()
__A : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result["bwt_string"]}\''''
)
__A : List[str] = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
f'''we get original string \'{original_string}\''''
)
| 602 |
"""simple docstring"""
__A : Optional[int] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 602 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(
SCREAMING_SNAKE_CASE_, R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ", )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
if self.framework == "tf":
A__ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A__ : str = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase__ )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = self.get_masked_index(UpperCamelCase__ )
A__ : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def __snake_case ( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ):
if return_tensors is None:
A__ : Dict = self.framework
A__ : List[Any] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.ensure_exactly_one_mask_token(UpperCamelCase__ )
return model_inputs
def __snake_case ( self , UpperCamelCase__ ):
A__ : Optional[int] = self.model(**UpperCamelCase__ )
A__ : Optional[int] = model_inputs['''input_ids''']
return model_outputs
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=5 , UpperCamelCase__=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
A__ : Dict = target_ids.shape[0]
A__ : int = model_outputs['''input_ids'''][0]
A__ : List[str] = model_outputs['''logits''']
if self.framework == "tf":
A__ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A__ : Tuple = outputs.numpy()
A__ : int = outputs[0, masked_index, :]
A__ : str = stable_softmax(UpperCamelCase__ , axis=-1 )
if target_ids is not None:
A__ : str = tf.gather_nd(tf.squeeze(UpperCamelCase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
A__ : Union[str, Any] = tf.expand_dims(UpperCamelCase__ , 0 )
A__ : Union[str, Any] = tf.math.top_k(UpperCamelCase__ , k=UpperCamelCase__ )
A__ , A__ : Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
A__ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A__ : Optional[Any] = outputs[0, masked_index, :]
A__ : Union[str, Any] = logits.softmax(dim=-1 )
if target_ids is not None:
A__ : Dict = probs[..., target_ids]
A__ , A__ : Union[str, Any] = probs.topk(UpperCamelCase__ )
A__ : Optional[Any] = []
A__ : List[str] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A__ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A__ : Any = input_ids.numpy().copy()
if target_ids is not None:
A__ : int = target_ids[p].tolist()
A__ : Optional[int] = p
# Filter padding out:
A__ : Optional[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A__ : Optional[Any] = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
A__ : Any = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(UpperCamelCase__ )
result.append(UpperCamelCase__ )
if single_mask:
return result[0]
return result
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = [targets]
try:
A__ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A__ : Optional[int] = {}
A__ : Tuple = []
for target in targets:
A__ : Optional[int] = vocab.get(UpperCamelCase__ , UpperCamelCase__ )
if id_ is None:
A__ : Dict = self.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , max_length=1 , truncation=UpperCamelCase__ , )['''input_ids''']
if len(UpperCamelCase__ ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
A__ : Optional[int] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
A__ : str = list(set(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
A__ : Dict = np.array(UpperCamelCase__ )
return target_ids
def __snake_case ( self , UpperCamelCase__=None , UpperCamelCase__=None ):
A__ : Optional[Any] = {}
if targets is not None:
A__ : str = self.get_target_ids(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[Any] = target_ids
if top_k is not None:
A__ : Tuple = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ):
A__ : str = super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) == 1:
return outputs[0]
return outputs | 702 |
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution()) | 55 | 0 |
def _snake_case ( __snake_case , __snake_case ):
return number | (1 << position)
def _snake_case ( __snake_case , __snake_case ):
return number & ~(1 << position)
def _snake_case ( __snake_case , __snake_case ):
return number ^ (1 << position)
def _snake_case ( __snake_case , __snake_case ):
return ((number >> position) & 1) == 1
def _snake_case ( __snake_case , __snake_case ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _snake_case ( snake_case__ : ArgumentParser ) -> Tuple:
raise NotImplementedError()
@abstractmethod
def _snake_case ( self : Optional[Any] ) -> Optional[Any]:
raise NotImplementedError() | 544 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ="""fnet"""
def __init__(self , a_=3_20_00 , a_=7_68 , a_=12 , a_=30_72 , a_="gelu_new" , a_=0.1 , a_=5_12 , a_=4 , a_=0.02 , a_=1E-12 , a_=False , a_=5_12 , a_=3 , a_=1 , a_=2 , **a_ , ):
'''simple docstring'''
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case : Optional[int] = vocab_size
__snake_case : Dict = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Dict = initializer_range
__snake_case : Dict = type_vocab_size
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Tuple = use_tpu_fourier_optimizations
__snake_case : Union[str, Any] = tpu_short_seq_length
| 721 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase ( _snake_case : list[int] , _snake_case : list[int] , _snake_case : int ) ->list[int]:
"""simple docstring"""
__snake_case : List[Any] = [0] * no_of_processes
__snake_case : str = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_snake_case ):
__snake_case : Optional[Any] = burst_time[i]
__snake_case : list[int] = []
__snake_case : Optional[Any] = 0
__snake_case : Dict = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__snake_case : Optional[Any] = []
__snake_case : List[str] = -1
for i in range(_snake_case ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_snake_case )
if len(_snake_case ) > 0:
__snake_case : Union[str, Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__snake_case : int = i
total_time += burst_time[target_process]
completed += 1
__snake_case : Optional[Any] = 0
__snake_case : Optional[int] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase ( _snake_case : list[int] , _snake_case : int , _snake_case : list[int] ) ->list[int]:
"""simple docstring"""
__snake_case : Optional[int] = [0] * no_of_processes
for i in range(_snake_case ):
__snake_case : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
SCREAMING_SNAKE_CASE : Dict = 4
SCREAMING_SNAKE_CASE : List[Any] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE : Optional[Any] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE : Any = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 229 | 0 |
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: Optional[int] =len(__a ), len(grid[0] )
if (
min(__a , __a ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCamelCase__: str =0
count += depth_first_search(__a , row + 1 , __a , __a )
count += depth_first_search(__a , row - 1 , __a , __a )
count += depth_first_search(__a , __a , col + 1 , __a )
count += depth_first_search(__a , __a , col - 1 , __a )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE_ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
snake_case_ = logging.getLogger(__name__)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE_ : Any = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
SCREAMING_SNAKE_CASE_ : Optional[int] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
SCREAMING_SNAKE_CASE_ : Any = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
" function." )
SCREAMING_SNAKE_CASE_ : Dict = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"val_{metric}" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {F"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase__ )
@rank_zero_only
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=True ):
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
SCREAMING_SNAKE_CASE_ : List[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE_ : Optional[Any] = od / "test_results.txt"
SCREAMING_SNAKE_CASE_ : int = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE_ : str = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
SCREAMING_SNAKE_CASE_ : str = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=lowercase__ )
generations_file.parent.mkdir(exist_ok=lowercase__ )
with open(lowercase__ , "a+" ) as writer:
for key in sorted(lowercase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE_ : Dict = metrics[key]
if isinstance(lowercase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val.item()
SCREAMING_SNAKE_CASE_ : str = F"{key}: {val:.6f}\n"
writer.write(lowercase__ )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE_ : List[str] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowercase__ )
@rank_zero_only
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE_ : str = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE_ : Optional[int] = count_trainable_parameters(lowercase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase__ , lowercase__ , "test" )
@rank_zero_only
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 421 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCAmelCase ( lowercase__ : int ):
'''simple docstring'''
def is_in_circle(lowercase__ : float , lowercase__ : float ) -> bool:
a__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowercase__ ) )
# The ratio of the area for circle to square is pi/4.
a__ = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def UpperCAmelCase ( lowercase__ : int , lowercase__ : Callable[[float], float] , lowercase__ : float = 0.0 , lowercase__ : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(lowercase__ , lowercase__ ) ) for _ in range(lowercase__ ) ) * (max_value - min_value)
def UpperCAmelCase ( lowercase__ : int , lowercase__ : float = 0.0 , lowercase__ : float = 1.0 ):
'''simple docstring'''
def identity_function(lowercase__ : float ) -> float:
return x
a__ = area_under_curve_estimator(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a__ = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def UpperCAmelCase ( lowercase__ : int ):
'''simple docstring'''
def function_to_integrate(lowercase__ : float ) -> float:
return sqrt(4.0 - x * x )
a__ = area_under_curve_estimator(
lowercase__ , lowercase__ , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_lowercase : int =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase_ ( A_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
a__ = eval_examples
a__ = post_process_function
a__ = quant_trainer_args
a__ = 128 # default number of calibration samples
def _A ( self , lowerCamelCase=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
a__ = calib_dataset if calib_dataset is not None else self.calib_dataset
a__ = self._remove_unused_columns(lowerCamelCase , description="""Calibration""" )
return DataLoader(
lowerCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCamelCase , )
def _A ( self , lowerCamelCase=None ):
'''simple docstring'''
a__ = self.train_dataset if calib_dataset is None else calib_dataset
a__ = self.get_calib_dataloader(lowerCamelCase )
a__ = self.model
quant_trainer.configure_model(lowerCamelCase , self.quant_trainer_args , calib=lowerCamelCase )
model.eval()
quant_trainer.enable_calibration(lowerCamelCase )
logger.info("""***** Running calibration *****""" )
logger.info(f' Num examples = {self.calib_num}' )
logger.info(f' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(lowerCamelCase ):
# Prediction step
a__ , a__ , a__ = self.prediction_step(lowerCamelCase , lowerCamelCase , prediction_loss_only=lowerCamelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCamelCase , self.quant_trainer_args )
a__ = model
def _A ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = "eval" ):
'''simple docstring'''
a__ = self.eval_dataset if eval_dataset is None else eval_dataset
a__ = self.get_eval_dataloader(lowerCamelCase )
a__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a__ = self.compute_metrics
a__ = None
a__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a__ = eval_loop(
lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , )
finally:
a__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
a__ = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions )
a__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
a__ = metrics.pop(lowerCamelCase )
self.log(lowerCamelCase )
else:
a__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase )
return metrics
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase = "test" ):
'''simple docstring'''
a__ = self.get_test_dataloader(lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
a__ = self.compute_metrics
a__ = None
a__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a__ = eval_loop(
lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , )
finally:
a__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
a__ = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions , """predict""" )
a__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
a__ = metrics.pop(lowerCamelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase )
def _A ( self , lowerCamelCase="./" ):
'''simple docstring'''
a__ = self.eval_dataset
a__ = self.get_eval_dataloader(lowerCamelCase )
a__ = next(iter(lowerCamelCase ) )
# saving device - to make it consistent
a__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
a__ = tuple(v.to(lowerCamelCase ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
a__ = True
a__ = self.model.to(lowerCamelCase )
model.eval()
model.float()
a__ = model.module if hasattr(lowerCamelCase , """module""" ) else model
quant_trainer.configure_model(lowerCamelCase , self.quant_trainer_args )
a__ = os.path.join(lowerCamelCase , """model.onnx""" )
logger.info(f'exporting model to {output_model_file}' )
a__ = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
lowerCamelCase , lowerCamelCase , lowerCamelCase , export_params=lowerCamelCase , opset_version=13 , do_constant_folding=lowerCamelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=lowerCamelCase , )
logger.info("""onnx export finished""" )
| 412 | 1 |
from sklearn.metrics import recall_score
import datasets
_snake_case : Dict = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
_snake_case : Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
_snake_case : str = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Optional[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]="binary" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str="warn" , ) -> int:
__lowerCAmelCase = recall_score(
lowerCAmelCase_ , lowerCAmelCase_ , labels=lowerCAmelCase_ , pos_label=lowerCAmelCase_ , average=lowerCAmelCase_ , sample_weight=lowerCAmelCase_ , zero_division=lowerCAmelCase_ , )
return {"recall": float(lowerCAmelCase_ ) if score.size == 1 else score}
| 53 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : int = 1_6_0_0_0 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : List[Any] =int(round(sample_rate * max_length ) )
if len(UpperCAmelCase_ ) <= sample_length:
return wav
SCREAMING_SNAKE_CASE_ : List[str] =randint(0 , len(UpperCAmelCase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowercase_ :
__lowerCamelCase = field(default=A , metadata={"help": "Name of a dataset from the datasets package"} )
__lowerCamelCase = field(
default=A , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__lowerCamelCase = field(
default=A , metadata={"help": "A file containing the training audio paths and labels."} )
__lowerCamelCase = field(
default=A , metadata={"help": "A file containing the validation audio paths and labels."} )
__lowerCamelCase = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__lowerCamelCase = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
__lowerCamelCase = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
__lowerCamelCase = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
__lowerCamelCase = field(
default=A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__lowerCamelCase = field(
default=A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__lowerCamelCase = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class lowercase_ :
__lowerCamelCase = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
__lowerCamelCase = field(
default=A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__lowerCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__lowerCamelCase = field(
default=A , metadata={"help": "Name or path of preprocessor config."} )
__lowerCamelCase = field(
default=A , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__lowerCamelCase = field(
default=A , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__lowerCamelCase = field(
default=A , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__lowerCamelCase = field(
default=A , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__lowerCamelCase = field(
default=A , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _snake_case ( self ) -> int:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __A , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : List[str] =training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ : Dict =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ : Dict =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
SCREAMING_SNAKE_CASE_ : str =DatasetDict()
SCREAMING_SNAKE_CASE_ : Dict =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ : Dict =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
SCREAMING_SNAKE_CASE_ : Dict =AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
SCREAMING_SNAKE_CASE_ : Dict =raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
SCREAMING_SNAKE_CASE_ : List[str] =feature_extractor.model_input_names[0]
def train_transforms(UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE_ : int =[]
for audio in batch[data_args.audio_column_name]:
SCREAMING_SNAKE_CASE_ : List[str] =random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =feature_extractor(UpperCAmelCase_ , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE_ : List[Any] ={model_input_name: inputs.get(UpperCAmelCase_ )}
SCREAMING_SNAKE_CASE_ : str =list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE_ : List[Any] =[audio['''array'''] for audio in batch[data_args.audio_column_name]]
SCREAMING_SNAKE_CASE_ : List[Any] =feature_extractor(UpperCAmelCase_ , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE_ : int ={model_input_name: inputs.get(UpperCAmelCase_ )}
SCREAMING_SNAKE_CASE_ : List[Any] =list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE_ : Optional[Any] =raw_datasets['''train'''].features[data_args.label_column_name].names
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] ={}, {}
for i, label in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] =str(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] =label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE_ : Union[str, Any] =evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE_ : Tuple =np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCAmelCase_ , references=eval_pred.label_ids )
SCREAMING_SNAKE_CASE_ : Optional[int] =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCAmelCase_ ) , labelaid=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ : int =AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCAmelCase_ , output_all_columns=UpperCAmelCase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ : str =(
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCAmelCase_ , output_all_columns=UpperCAmelCase_ )
# Initialize our trainer
SCREAMING_SNAKE_CASE_ : Optional[int] =Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ : Optional[int] =None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : int =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : List[str] =last_checkpoint
SCREAMING_SNAKE_CASE_ : str =trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ : str =trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCAmelCase_ )
trainer.save_metrics('''eval''' , UpperCAmelCase_ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 431 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( A ):
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , __A , __A ) -> List[Any]:
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self , __A = 1 , __A = 50 , __A = None , __A = "pil" , __A = True , **__A , ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.unet.config.sample_size
SCREAMING_SNAKE_CASE_ : Tuple =(batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE_ : Tuple =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE_ : Any =randn_tensor(__A , generator=__A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE_ : Optional[int] =self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler.add_noise_to_input(__A , __A , generator=__A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : Dict =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE_ : int =self.scheduler.step(__A , __A , __A , __A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : Tuple =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.scheduler.step_correct(
__A , __A , __A , __A , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =step_output.prev_sample
SCREAMING_SNAKE_CASE_ : Tuple =(sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Optional[int] =self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 431 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case = logging.get_logger(__name__)
snake_case = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __A ( snake_case__ ,snake_case__ ):
'''simple docstring'''
a_ = '''bit'''
a_ = ['''preactivation''', '''bottleneck''']
a_ = ['''SAME''', '''VALID''']
def __init__( self , _snake_case=3 , _snake_case=64 , _snake_case=[256, 512, 1024, 2048] , _snake_case=[3, 4, 6, 3] , _snake_case="preactivation" , _snake_case="relu" , _snake_case=None , _snake_case=32 , _snake_case=0.0 , _snake_case=False , _snake_case=32 , _snake_case=1 , _snake_case=None , _snake_case=None , **_snake_case , ):
super().__init__(**_snake_case )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCAmelCase : Any = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Any = embedding_size
_lowerCAmelCase : List[str] = hidden_sizes
_lowerCAmelCase : Any = depths
_lowerCAmelCase : str = layer_type
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : List[Any] = global_padding
_lowerCAmelCase : Tuple = num_groups
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Tuple = embedding_dynamic_padding
_lowerCAmelCase : Optional[int] = output_stride
_lowerCAmelCase : List[Any] = width_factor
_lowerCAmelCase : Optional[int] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(_snake_case ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
| 424 | import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case = False
snake_case = logging.get_logger(__name__)
snake_case = "ybelkada/fonts"
def UpperCamelCase_ ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , ["torch"] )
_check_torch_version()
_lowerCAmelCase : int = image_tensor.unsqueeze(0 )
_lowerCAmelCase : Any = torch.nn.functional.unfold(lowerCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_lowerCAmelCase : List[str] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCAmelCase__ , lowerCAmelCase__ , -1 )
_lowerCAmelCase : Union[str, Any] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ = 36 , lowerCAmelCase__ = "black" , lowerCAmelCase__ = "white" , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , "vision" )
# Add new lines so that each line is no more than 80 characters.
_lowerCAmelCase : List[Any] = textwrap.TextWrapper(width=80 )
_lowerCAmelCase : Tuple = wrapper.wrap(text=lowerCAmelCase__ )
_lowerCAmelCase : str = "\n".join(lowerCAmelCase__ )
if font_bytes is not None and font_path is None:
_lowerCAmelCase : Optional[Any] = io.BytesIO(lowerCAmelCase__ )
elif font_path is not None:
_lowerCAmelCase : Dict = font_path
else:
_lowerCAmelCase : str = hf_hub_download(lowerCAmelCase__ , "Arial.TTF" )
_lowerCAmelCase : Tuple = ImageFont.truetype(lowerCAmelCase__ , encoding="UTF-8" , size=lowerCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_lowerCAmelCase : List[Any] = ImageDraw.Draw(Image.new("RGB" , (1, 1) , lowerCAmelCase__ ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = temp_draw.textbbox((0, 0) , lowerCAmelCase__ , lowerCAmelCase__ )
# Create the actual image with a bit of padding around the text.
_lowerCAmelCase : Union[str, Any] = text_width + left_padding + right_padding
_lowerCAmelCase : int = text_height + top_padding + bottom_padding
_lowerCAmelCase : str = Image.new("RGB" , (image_width, image_height) , lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = ImageDraw.Draw(lowerCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=lowerCAmelCase__ , fill=lowerCAmelCase__ , font=lowerCAmelCase__ )
return image
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , "vision" )
# Convert to PIL image if necessary
_lowerCAmelCase : int = to_pil_image(lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = render_text(lowerCAmelCase__ , **lowerCAmelCase__ )
_lowerCAmelCase : List[str] = max(header_image.width , image.width )
_lowerCAmelCase : str = int(image.height * (new_width / image.width) )
_lowerCAmelCase : Optional[Any] = int(header_image.height * (new_width / header_image.width) )
_lowerCAmelCase : Union[str, Any] = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_lowerCAmelCase : int = to_numpy_array(lowerCAmelCase__ )
if infer_channel_dimension_format(lowerCAmelCase__ ) == ChannelDimension.LAST:
_lowerCAmelCase : Optional[Any] = to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST )
return new_image
class __A ( snake_case__ ):
'''simple docstring'''
a_ = ['''flattened_patches''']
def __init__( self , _snake_case = True , _snake_case = True , _snake_case = None , _snake_case = 2048 , _snake_case = False , **_snake_case , ):
super().__init__(**_snake_case )
_lowerCAmelCase : Optional[int] = patch_size if patch_size is not None else {"height": 16, "width": 16}
_lowerCAmelCase : List[Any] = do_normalize
_lowerCAmelCase : int = do_convert_rgb
_lowerCAmelCase : Optional[Any] = max_patches
_lowerCAmelCase : str = is_vqa
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ):
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
_lowerCAmelCase : Optional[int] = to_channel_dimension_format(_snake_case , ChannelDimension.FIRST )
_lowerCAmelCase : int = torch.from_numpy(_snake_case )
_lowerCAmelCase , _lowerCAmelCase : str = patch_size["height"], patch_size["width"]
_lowerCAmelCase , _lowerCAmelCase : Any = get_image_size(_snake_case )
# maximize scale s.t.
_lowerCAmelCase : int = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_lowerCAmelCase : Dict = max(min(math.floor(scale * image_height / patch_height ) , _snake_case ) , 1 )
_lowerCAmelCase : int = max(min(math.floor(scale * image_width / patch_width ) , _snake_case ) , 1 )
_lowerCAmelCase : str = max(num_feasible_rows * patch_height , 1 )
_lowerCAmelCase : int = max(num_feasible_cols * patch_width , 1 )
_lowerCAmelCase : Dict = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_snake_case , antialias=_snake_case , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_lowerCAmelCase : List[str] = torch_extract_patches(_snake_case , _snake_case , _snake_case )
_lowerCAmelCase : Optional[Any] = patches.shape
_lowerCAmelCase : Optional[int] = patches_shape[1]
_lowerCAmelCase : Union[str, Any] = patches_shape[2]
_lowerCAmelCase : Any = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_lowerCAmelCase : Dict = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_lowerCAmelCase : List[Any] = torch.arange(_snake_case ).reshape([rows, 1] ).repeat(1 , _snake_case ).reshape([rows * columns, 1] )
_lowerCAmelCase : Dict = torch.arange(_snake_case ).reshape([1, columns] ).repeat(_snake_case , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_lowerCAmelCase : Optional[Any] = row_ids.to(torch.floataa )
_lowerCAmelCase : int = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_lowerCAmelCase : Tuple = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_lowerCAmelCase : Tuple = torch.nn.functional.pad(_snake_case , [0, 0, 0, max_patches - (rows * columns)] ).float()
_lowerCAmelCase : Optional[Any] = to_numpy_array(_snake_case )
return result
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , **_snake_case ):
if image.dtype == np.uinta:
_lowerCAmelCase : Dict = image.astype(np.floataa )
# take mean across the whole `image`
_lowerCAmelCase : Any = np.mean(_snake_case )
_lowerCAmelCase : Optional[int] = np.std(_snake_case )
_lowerCAmelCase : Any = max(_snake_case , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_snake_case , mean=_snake_case , std=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
_lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase : Dict = patch_size if patch_size is not None else self.patch_size
_lowerCAmelCase : Any = max_patches if max_patches is not None else self.max_patches
_lowerCAmelCase : List[str] = self.is_vqa
if kwargs.get("data_format" , _snake_case ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
_lowerCAmelCase : int = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase : List[str] = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase : Any = [to_numpy_array(_snake_case ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
_lowerCAmelCase : str = kwargs.pop("font_bytes" , _snake_case )
_lowerCAmelCase : List[Any] = kwargs.pop("font_path" , _snake_case )
if isinstance(_snake_case , _snake_case ):
_lowerCAmelCase : Optional[int] = [header_text] * len(_snake_case )
_lowerCAmelCase : Optional[Any] = [
render_header(_snake_case , header_text[i] , font_bytes=_snake_case , font_path=_snake_case )
for i, image in enumerate(_snake_case )
]
if do_normalize:
_lowerCAmelCase : int = [self.normalize(image=_snake_case ) for image in images]
# convert to torch tensor and permute
_lowerCAmelCase : Tuple = [
self.extract_flattened_patches(image=_snake_case , max_patches=_snake_case , patch_size=_snake_case )
for image in images
]
# create attention mask in numpy
_lowerCAmelCase : Optional[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_lowerCAmelCase : List[Any] = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_snake_case )
return encoded_outputs
| 424 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowerCamelCase__ : Dict = True
for i in range(0 , len(_lowerCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowerCamelCase__ , lowerCamelCase__ : str = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowerCamelCase__ : List[Any] = False
for i in range(1 , len(_lowerCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowerCamelCase__ , lowerCamelCase__ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowerCamelCase__ : Optional[int] = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A_ : Any = [int(x) for x in input().split()]
# inputing elements of the list in one line
A_ : Tuple = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 1 |
'''simple docstring'''
from maths.prime_check import is_prime
def A (__lowerCamelCase :int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Tuple=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[int]=True , __magic_name__ : int=True , __magic_name__ : Optional[int]=True , __magic_name__ : Any=99 , __magic_name__ : Optional[Any]=64 , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Dict=5 , __magic_name__ : str=4 , __magic_name__ : List[Any]=37 , __magic_name__ : List[str]="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : str=512 , __magic_name__ : Dict=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.02 , __magic_name__ : List[str]=3 , __magic_name__ : str=4 , __magic_name__ : List[str]=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = embedding_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : List[str] ) -> int:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def __A ( self : Any , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = MobileBertModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , token_type_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = MobileBertForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = MobileBertForNextSentencePrediction(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self : Any , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = MobileBertForPreTraining(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , next_sentence_label=__magic_name__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self : Dict , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = MobileBertForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : str , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileBertForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : str , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileBertForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : int , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = MobileBertForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def __A ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : int=False ) -> Any:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = MobileBertModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __A ( self : int ) -> List[str]:
self.config_tester.run_common_tests()
def __A ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__magic_name__ )
def __A ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__magic_name__ )
def __A ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__magic_name__ )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__magic_name__ )
def __A ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__magic_name__ )
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__magic_name__ )
def __A ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__magic_name__ )
def __A ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__magic_name__ )
def a__ ( __UpperCamelCase ):
return torch.tensor(
__UpperCamelCase , dtype=torch.long , device=__UpperCamelCase , )
A : List[str] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=__magic_name__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
SCREAMING_SNAKE_CASE_ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
SCREAMING_SNAKE_CASE_ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 140 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : Dict = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 108 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = '''transfo-xl'''
lowerCAmelCase = ['''mems''']
lowerCAmelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=26_77_35 , a=[2_00_00, 4_00_00, 20_00_00] , a=10_24 , a=10_24 , a=16 , a=64 , a=40_96 , a=4 , a=False , a=18 , a=16_00 , a=10_00 , a=True , a=True , a=0 , a=-1 , a=True , a=0.1 , a=0.0 , a=True , a="normal" , a=0.01 , a=0.01 , a=0.02 , a=1E-5 , a=0 , **a , ) -> Union[str, Any]:
snake_case_ = vocab_size
snake_case_ = []
self.cutoffs.extend(a )
if proj_share_all_but_first:
snake_case_ = [False] + [True] * len(self.cutoffs )
else:
snake_case_ = [False] + [False] * len(self.cutoffs )
snake_case_ = d_model
snake_case_ = d_embed
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = div_val
snake_case_ = pre_lnorm
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = mem_len
snake_case_ = same_length
snake_case_ = attn_type
snake_case_ = clamp_len
snake_case_ = sample_softmax
snake_case_ = adaptive
snake_case_ = dropout
snake_case_ = dropatt
snake_case_ = untie_r
snake_case_ = init
snake_case_ = init_range
snake_case_ = proj_init_std
snake_case_ = init_std
snake_case_ = layer_norm_epsilon
super().__init__(eos_token_id=a , **a )
@property
def _UpperCamelCase ( self ) -> int:
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _UpperCamelCase ( self , a ) -> List[str]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 198 | 0 |
def __lowercase ( _A ) -> int:
if n == 1 or not isinstance(_A , _A ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE : int = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowercase ( _A ) -> int:
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE : Dict = len(str(fibonacci(_A ) ) )
return index
def __lowercase ( _A = 1000 ) -> int:
return fibonacci_digits_index(_A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 707 |
def __lowercase ( _A , _A ) -> int:
assert x is not None
assert y is not None
SCREAMING_SNAKE_CASE : int = len(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_A )
# declaring the array for storing the dp values
SCREAMING_SNAKE_CASE : Tuple = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
SCREAMING_SNAKE_CASE : int = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
SCREAMING_SNAKE_CASE : int = """"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = m, n
while i > 0 and j > 0:
SCREAMING_SNAKE_CASE : Tuple = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
SCREAMING_SNAKE_CASE : List[Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase__ : str = """AGGTAB"""
UpperCAmelCase__ : Optional[int] = """GXTXAYB"""
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : Dict = """GTAB"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 446 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE = {
"google/electra-small-generator": 5_12,
"google/electra-base-generator": 5_12,
"google/electra-large-generator": 5_12,
"google/electra-small-discriminator": 5_12,
"google/electra-base-discriminator": 5_12,
"google/electra-large-discriminator": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Dict = VOCAB_FILES_NAMES
__lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[Any] = ElectraTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> List[Any]:
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowerCAmelCase = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = strip_accents
_lowerCAmelCase = tokenize_chinese_chars
_lowerCAmelCase = normalizer_class(**_lowerCAmelCase )
_lowerCAmelCase = do_lower_case
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
_lowerCAmelCase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 18 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""transformers""", """torch""", """note_seq"""]
def __init__( self : Dict , *snake_case_ : Any , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __magic_name__ ( cls : List[Any] , *snake_case_ : Any , **snake_case_ : int ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 347 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=4 , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase_ ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=snake_case )
_UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
@require_flax
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(snake_case )[0]
_UpperCAmelCase = 50000
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , snake_case )
_UpperCAmelCase = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
| 24 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24 | 1 |
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ = """examples/"""
UpperCAmelCase_ = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ = """README.md"""
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :int , _snake_case :List[str] ) -> Dict:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.read()
_A , _A = REPLACE_PATTERNS[pattern]
_A = replace.replace('''VERSION''' , _snake_case )
_A = re_pattern.sub(_snake_case , _snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> List[Any]:
for folder, directories, fnames in os.walk(_snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_snake_case , _snake_case ) , _snake_case , pattern='''examples''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Optional[int]=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_snake_case , _snake_case , _snake_case )
if not patch:
update_version_in_examples(_snake_case )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
_A = '''🤗 Transformers currently provides the following architectures'''
_A = '''1. Want to contribute a new model?'''
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
# Find the start of the list.
_A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_A = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_snake_case )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
_A = f.read()
_A = REPLACE_PATTERNS['''init'''][0].search(_snake_case ).groups()[0]
return packaging.version.parse(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any]=False ) -> Dict:
_A = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_A = default_version.base_version
elif patch:
_A = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_A = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_A = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_snake_case ) == 0:
_A = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_snake_case , patch=_snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ ( ) -> str:
_A = get_version()
_A = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_A = current_version.base_version
# Check with the user we got that right.
_A = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_snake_case ) == 0:
_A = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 2 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE : Any = False
try:
SCREAMING_SNAKE_CASE : List[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ = None , UpperCamelCase_ = [] ):
lowercase_ :str = 0
lowercase_ :str = choices
lowercase_ :List[str] = prompt
if sys.platform == "win32":
lowercase_ :List[Any] = '''*'''
else:
lowercase_ :str = '''➔ '''
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase_ )
else:
forceWrite(self.choices[index] , UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(UpperCamelCase_ )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = 1 ):
lowercase_ :Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase_ )
move_cursor(UpperCamelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(10 )] )
def UpperCamelCase ( self ):
lowercase_ :int = int(chr(self.current_selection ) )
lowercase_ :Optional[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase_ )
else:
return
else:
return
def UpperCamelCase ( self , UpperCamelCase_ = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
lowercase_ :str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase_ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowercase_ :Optional[Any] = int(builtins.input() )
except ValueError:
lowercase_ :List[Any] = default_choice
else:
lowercase_ :List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(UpperCamelCase_ , '''\n''' )
return choice
| 257 | 0 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 51 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51 | 1 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> int:
'''simple docstring'''
return abs(lowercase_ ) if a == 0 else greatest_common_divisor(b % a , lowercase_ )
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> int:
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowercase , lowercase =y, x % y
return abs(lowercase_ )
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
try:
lowercase =input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
lowercase =int(nums[0] )
lowercase =int(nums[1] )
print(
f'greatest_common_divisor({num_a}, {num_a}) = '
f'{greatest_common_divisor(lowercase_ , lowercase_ )}' )
print(f'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowercase_ , lowercase_ )}' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 72 | import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
lowerCamelCase_ : Union[str, Any] = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
lowerCamelCase_ : List[Any] = {
"""jukebox""": 512,
}
class a__ ( __snake_case ):
A__ : Dict = VOCAB_FILES_NAMES
A__ : int = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_LYRIC_TOKENS_SIZES
A__ : List[Any] = ['input_ids', 'attention_mask']
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=["v3", "v2", "v2"] , UpperCAmelCase=5_1_2 , UpperCAmelCase=5 , UpperCAmelCase="<|endoftext|>" , **UpperCAmelCase , ) -> Dict:
__a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
super().__init__(
unk_token=UpperCAmelCase , n_genres=UpperCAmelCase , version=UpperCAmelCase , max_n_lyric_tokens=UpperCAmelCase , **UpperCAmelCase , )
__a = version
__a = max_n_lyric_tokens
__a = n_genres
with open(UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__a = json.load(UpperCAmelCase )
with open(UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__a = json.load(UpperCAmelCase )
with open(UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__a = json.load(UpperCAmelCase )
__a = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
__a = oov.replace(R'\-\'' , R'\-+\'' )
__a = regex.compile(UpperCAmelCase )
__a = {v: k for k, v in self.artists_encoder.items()}
__a = {v: k for k, v in self.genres_encoder.items()}
__a = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
__a = [self.artists_encoder.get(UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase ) ):
__a = [self.genres_encoder.get(UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__a = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__a = [[self.lyrics_encoder.get(UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]:
return list(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
__a , __a , __a = self.prepare_for_tokenization(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = self._tokenize(UpperCAmelCase )
return artist, genre, lyrics
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__a = artists[idx].lower()
__a = [genres[idx].lower()]
else:
__a = self._normalize(artists[idx] ) + '.v2'
__a = [
self._normalize(UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__a = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__a = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase ) )}
__a = 0
__a = len(UpperCAmelCase ) + 1
__a = self.vocab
__a = {v: k for k, v in self.vocab.items()}
__a = ''
else:
__a = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__a = self._run_strip_accents(UpperCAmelCase )
__a = lyrics.replace('\\' , '\n' )
__a = self.out_of_vocab.sub('' , UpperCAmelCase ), [], []
return artists, genres, lyrics
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[Any]:
__a = unicodedata.normalize('NFD' , UpperCAmelCase )
__a = []
for char in text:
__a = unicodedata.category(UpperCAmelCase )
if cat == "Mn":
continue
output.append(UpperCAmelCase )
return "".join(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
__a = (
[chr(UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__a = frozenset(UpperCAmelCase )
__a = re.compile(R'_+' )
__a = ''.join([c if c in accepted else '_' for c in text.lower()] )
__a = pattern.sub('_' , UpperCAmelCase ).strip('_' )
return text
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
return " ".join(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> Tuple:
# Convert to TensorType
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
__a = TensorType(UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__a = tf.constant
__a = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__a = torch.tensor
__a = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__a = jnp.array
__a = _is_jax
else:
__a = np.asarray
__a = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__a = [inputs]
if not is_tensor(UpperCAmelCase ):
__a = as_tensor(UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="" , UpperCAmelCase="pt" ) -> BatchEncoding:
__a = [0, 0, 0]
__a = [artist] * len(self.version )
__a = [genres] * len(self.version )
__a , __a , __a = self.tokenize(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a , __a , __a = self._convert_token_to_id(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = [-INFINITY] * len(full_tokens[-1] )
__a = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase ) )
__a = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase ) )
__a = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
__a = self.artists_decoder.get(UpperCAmelCase )
__a = [self.genres_decoder.get(UpperCAmelCase ) for genre in genres_index]
__a = [self.lyrics_decoder.get(UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 559 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( a , a , a ):
def update_area_of_max_square(a , a ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(a , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , a )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , a )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase__ ( a , a , a ):
def update_area_of_max_square_using_dp_array(
a , a , a ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(a , col + 1 , a )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , a )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , a , a )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , a )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(a )]
update_area_of_max_square_using_dp_array(0 , 0 , a )
return largest_square_area[0]
def lowerCamelCase__ ( a , a , a ):
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(a , a , a )
__snake_case = max(dp_array[row][col] , a )
else:
__snake_case = 0
return largest_square_area
def lowerCamelCase__ ( a , a , a ):
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(a , a , a )
__snake_case = max(current_row[col] , a )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 427 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( a , a , a ):
# Construct model
if openai_config_file == "":
__snake_case = OpenAIGPTConfig()
else:
__snake_case = OpenAIGPTConfig.from_json_file(a )
__snake_case = OpenAIGPTModel(a )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a , a , a )
# Save pytorch-model
__snake_case = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__snake_case = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , a )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowercase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 427 | 1 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _lowerCAmelCase ( __magic_name__ :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :str , __magic_name__ :Dict=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
UpperCAmelCase_ = torch.tensor(tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) ).unsqueeze(0 ) # Batch size 1
UpperCAmelCase_ = model(__magic_name__ )[0] # The last hidden-state is the first element of the output tuple
UpperCAmelCase_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
UpperCAmelCase_ = logits[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=0 )
UpperCAmelCase_, UpperCAmelCase_ = prob.topk(k=__magic_name__ , dim=0 )
UpperCAmelCase_ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__magic_name__ ) )] )
UpperCAmelCase_ = tokenizer.mask_token
UpperCAmelCase_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
UpperCAmelCase_ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(__magic_name__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__magic_name__ ) , __magic_name__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__magic_name__ , __magic_name__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_lowerCamelCase : Union[str, Any] = CamembertTokenizer.from_pretrained('camembert-base')
_lowerCamelCase : str = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
_lowerCamelCase : Any = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 121 |
import os
from math import logaa
def _lowerCAmelCase ( __magic_name__ :str = "base_exp.txt" ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) ):
UpperCAmelCase_, UpperCAmelCase_ = list(map(__magic_name__ , line.split(''',''' ) ) )
if x * logaa(__magic_name__ ) > largest:
UpperCAmelCase_ = x * logaa(__magic_name__ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 121 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ : Tuple = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
UpperCamelCase__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_536,
'junnyu/roformer_chinese_base': 1_536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
UpperCamelCase__ : int = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : str = RoFormerTokenizer
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=True ,lowerCamelCase_="[UNK]" ,lowerCamelCase_="[SEP]" ,lowerCamelCase_="[PAD]" ,lowerCamelCase_="[CLS]" ,lowerCamelCase_="[MASK]" ,lowerCamelCase_=True ,lowerCamelCase_=None ,**lowerCamelCase_ ,) -> str:
'''simple docstring'''
super().__init__(
lowerCamelCase_ ,tokenizer_file=lowerCamelCase_ ,do_lower_case=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,tokenize_chinese_chars=lowerCamelCase_ ,strip_accents=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' ,lowerCamelCase_ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' ,lowerCamelCase_ ) != strip_accents
):
UpperCAmelCase__ : Optional[int] = getattr(lowerCamelCase_ ,pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : List[str] = do_lower_case
UpperCAmelCase__ : Any = strip_accents
UpperCAmelCase__ : Union[str, Any] = pre_tok_class(**lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = do_lower_case
def __getstate__( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase__ : Dict = BertPreTokenizer()
return state
def __setstate__( self ,lowerCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = d
UpperCAmelCase__ : Optional[int] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCAmelCase__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(lowerCamelCase_ ) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = self._tokenizer.model.save(lowerCamelCase_ ,name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=False ,**lowerCamelCase_ ,) -> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = BertPreTokenizer()
return super().save_pretrained(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ )
| 721 | '''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = '▁'
UpperCamelCase__ : Any = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCamelCase__ : Union[str, Any] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
UpperCamelCase__ : Dict = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
UpperCamelCase__ : Optional[int] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase_ : List[int] = []
UpperCAmelCase_ : List[int] = []
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="<s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_="<unk>" ,lowerCamelCase_="m2m100" ,lowerCamelCase_ = None ,lowerCamelCase_=8 ,**lowerCamelCase_ ,) -> None:
'''simple docstring'''
UpperCAmelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase__ : Dict = language_codes
UpperCAmelCase__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase__ : Union[str, Any] = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
UpperCAmelCase__ : Any = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowerCamelCase_ )
for lang_code in fairseq_language_code
if self.get_lang_token(lowerCamelCase_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase_ ,tgt_lang=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,language_codes=lowerCamelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase__ : Optional[int] = vocab_file
UpperCAmelCase__ : Optional[Any] = load_json(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[Any] = spm_file
UpperCAmelCase__ : Any = load_spm(lowerCamelCase_ ,self.sp_model_kwargs )
UpperCAmelCase__ : int = len(self.encoder )
UpperCAmelCase__ : Optional[int] = {
self.get_lang_token(lowerCamelCase_ ): self.encoder_size + i for i, lang_code in enumerate(lowerCamelCase_ )
}
UpperCAmelCase__ : List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCamelCase_ )}
UpperCAmelCase__ : List[str] = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase__ : Optional[int] = src_lang if src_lang is not None else '''en'''
UpperCAmelCase__ : int = tgt_lang
UpperCAmelCase__ : int = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCAmelCase__ : Optional[int] = num_madeup_words
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ ,out_type=lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowerCamelCase_ ,self.encoder[self.unk_token] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowerCamelCase_ ,self.unk_token )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : str = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
UpperCAmelCase__ : str = []
else:
current_sub_tokens.append(lowerCamelCase_ )
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
UpperCAmelCase__ : Dict = [1] * len(self.prefix_tokens )
UpperCAmelCase__ : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase_ )) + ([0] * len(lowerCamelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Tuple = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.__dict__.copy()
UpperCAmelCase__ : str = None
return state
def __setstate__( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : int = load_spm(self.spm_file ,self.sp_model_kwargs )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path(lowerCamelCase_ )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
UpperCAmelCase__ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCAmelCase__ : str = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder ,lowerCamelCase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,lowerCamelCase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase_ ,'''wb''' ) as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (str(lowerCamelCase_ ), str(lowerCamelCase_ ))
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = "en" ,lowerCamelCase_ = None ,lowerCamelCase_ = "ro" ,**lowerCamelCase_ ,) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase__ : int = src_lang
UpperCAmelCase__ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase__ : List[str] = src_lang
UpperCAmelCase__ : List[str] = self(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = self.get_lang_id(lowerCamelCase_ )
UpperCAmelCase__ : Dict = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_lang_token(lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = self.lang_token_to_id[lang_token]
UpperCAmelCase__ : Union[str, Any] = [self.cur_lang_id]
UpperCAmelCase__ : Dict = [self.eos_token_id]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_lang_token(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.lang_token_to_id[lang_token]
UpperCAmelCase__ : Tuple = [self.cur_lang_id]
UpperCAmelCase__ : str = [self.eos_token_id]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_lang_token(lowerCamelCase_ )
return self.lang_token_to_id[lang_token]
def __UpperCamelCase( _A : str , _A : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def __UpperCamelCase( _A : str ):
'''simple docstring'''
with open(_A , '''r''' ) as f:
return json.load(_A )
def __UpperCamelCase( _A : List[str] , _A : str ):
'''simple docstring'''
with open(_A , '''w''' ) as f:
json.dump(_A , _A , indent=2 )
| 496 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase__ = 3,UpperCAmelCase__ = 7,UpperCAmelCase__ = 1_00_00_00 ) -> int:
'''simple docstring'''
a__ = 0
a__ = 1
for current_denominator in range(1,limit + 1 ):
a__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__ = current_numerator
a__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 232 | """simple docstring"""
__magic_name__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
a__ = input('Enter message: ' )
a__ = input('Enter key [alphanumeric]: ' )
a__ = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ = 'encrypt'
a__ = encrypt_message(UpperCAmelCase__,UpperCAmelCase__ )
elif mode.lower().startswith('d' ):
a__ = 'decrypt'
a__ = decrypt_message(UpperCAmelCase__,UpperCAmelCase__ )
print(f'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
return translate_message(UpperCAmelCase__,UpperCAmelCase__,'encrypt' )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
return translate_message(UpperCAmelCase__,UpperCAmelCase__,'decrypt' )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = []
a__ = 0
a__ = key.upper()
for symbol in message:
a__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
a__ = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 232 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : Dict = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class a ( a_ ):
UpperCAmelCase_ : Optional[Any] ="realm"
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=8 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=2_5_6 , _lowerCamelCase=1_0 , _lowerCamelCase=1e-3 , _lowerCamelCase=5 , _lowerCamelCase=3_2_0 , _lowerCamelCase=1_3_3_5_3_7_1_8 , _lowerCamelCase=5_0_0_0 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
# Common config
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = retriever_proj_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = num_candidates
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
# Reader config
lowercase = span_hidden_size
lowercase = max_span_width
lowercase = reader_layer_norm_eps
lowercase = reader_beam_size
lowercase = reader_seq_len
# Retrieval config
lowercase = num_block_records
lowercase = searcher_beam_size
| 134 |
"""simple docstring"""
from torch import nn
class a ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
lowercase = class_size
lowercase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowercase = nn.Linear(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowercase = self.mlp(_lowerCamelCase )
return logits
| 134 | 1 |
import mpmath # for roots of unity
import numpy as np
class lowerCamelCase :
def __init__( self , lowercase__=None , lowercase__=None):
# Input as list
__UpperCAmelCase : str = list(poly_a or [0])[:]
__UpperCAmelCase : Tuple = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__UpperCAmelCase : str = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__UpperCAmelCase : Optional[Any] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__UpperCAmelCase : Dict = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__UpperCAmelCase : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__UpperCAmelCase : Optional[int] = self.__multiply()
def A( self , lowercase__):
__UpperCAmelCase : Tuple = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(lowercase__) <= 1:
return dft[0]
#
__UpperCAmelCase : Optional[int] = self.c_max_length // 2
while next_ncol > 0:
__UpperCAmelCase : Union[str, Any] = [[] for i in range(lowercase__)]
__UpperCAmelCase : Optional[Any] = self.root**next_ncol
# First half of next step
__UpperCAmelCase : Any = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(lowercase__):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__UpperCAmelCase : List[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(lowercase__):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__UpperCAmelCase : Tuple = new_dft
__UpperCAmelCase : Tuple = next_ncol // 2
return dft[0]
def A( self):
__UpperCAmelCase : Tuple = self.__dft('''A''')
__UpperCAmelCase : List[Any] = self.__dft('''B''')
__UpperCAmelCase : List[str] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__UpperCAmelCase : str = 2
while next_ncol <= self.c_max_length:
__UpperCAmelCase : List[str] = [[] for i in range(lowercase__)]
__UpperCAmelCase : Optional[int] = self.root ** (next_ncol // 2)
__UpperCAmelCase : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__UpperCAmelCase : List[Any] = new_inverse_c
next_ncol *= 2
# Unpack
__UpperCAmelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
__UpperCAmelCase : Any = '''A = ''' + ''' + '''.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A]))
__UpperCAmelCase : int = '''B = ''' + ''' + '''.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B]))
__UpperCAmelCase : Optional[Any] = '''A*B = ''' + ''' + '''.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.product))
return F"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 462 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = OmegaConf.load(lowercase_ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase_ ) ) )
return config
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if conf_path is None:
__UpperCAmelCase : Optional[int] = '''./model_checkpoints/vqgan_only.yaml'''
__UpperCAmelCase : Dict = load_config(lowercase_ , display=lowercase_ )
__UpperCAmelCase : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
__UpperCAmelCase : Dict = '''./model_checkpoints/vqgan_only.pt'''
__UpperCAmelCase : int = torch.load(lowercase_ , map_location=lowercase_ )
if ".ckpt" in ckpt_path:
__UpperCAmelCase : Union[str, Any] = sd['''state_dict''']
model.load_state_dict(lowercase_ , strict=lowercase_ )
model.to(lowercase_ )
del sd
return model
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model.encode(lowercase_ )
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__UpperCAmelCase : List[str] = model.decode(lowercase_ )
return xrec
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> int:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = string.rsplit('''.''' , 1 )
if reload:
__UpperCAmelCase : Optional[int] = importlib.import_module(lowercase_ )
importlib.reload(lowercase_ )
return getattr(importlib.import_module(lowercase_ , package=lowercase_ ) , cls )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=True , lowercase_=True ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = instantiate_from_config(lowercase_ )
if sd is not None:
model.load_state_dict(lowercase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if ckpt:
__UpperCAmelCase : Optional[int] = torch.load(lowercase_ , map_location='''cpu''' )
__UpperCAmelCase : Union[str, Any] = pl_sd['''global_step''']
print(f"loaded model from global step {global_step}." )
else:
__UpperCAmelCase : Union[str, Any] = {'''state_dict''': None}
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : str = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=lowercase_ , eval_mode=lowercase_ )['''model''']
return model, global_step
| 462 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Tuple) -> List[str]:
# Construct model
if openai_config_file == "":
lowerCAmelCase_ = OpenAIGPTConfig()
else:
lowerCAmelCase_ = OpenAIGPTConfig.from_json_file(__snake_case)
lowerCAmelCase_ = OpenAIGPTModel(__snake_case)
# Load weights from numpy
load_tf_weights_in_openai_gpt(__snake_case , __snake_case , __snake_case)
# Save pytorch-model
lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''')
torch.save(model.state_dict() , __snake_case)
print(F'''Save configuration file to {pytorch_config_dump_path}''')
with open(__snake_case , '''w''' , encoding='''utf-8''') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
A_ : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
A_ : List[Any] =parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 606 | '''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def snake_case_ ( __snake_case : Callable) -> Callable:
@wraps(__snake_case)
def _inner_fn(*__snake_case : str , **__snake_case : Optional[int]):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , __snake_case , )
return fn(*__snake_case , **__snake_case)
return _inner_fn
| 606 | 1 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
snake_case_ : Any = """."""
if __name__ == "__main__":
snake_case_ : List[str] = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
snake_case_ : Any = []
snake_case_ : Tuple = []
with open(doctest_file_path) as fp:
for line in fp:
snake_case_ : List[Any] = line.strip()
snake_case_ : List[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
snake_case_ : Union[str, Any] = """\n""".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 595 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ : Tuple = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 595 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
# fmt: off
__lowercase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowercase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowercase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowercase = {'''unk_token''': '''<unk>'''}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case_ ) )
__lowercase = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowercase = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(snake_case_ , snake_case_ )
def A ( self , **snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **snake_case_ )
def A ( self , **snake_case_ ) -> str:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **snake_case_ )
def A ( self , **snake_case_ ) -> str:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
__lowercase = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=snake_case_ )
__lowercase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(snake_case_ , return_tensors='''np''' )
__lowercase = processor(images=snake_case_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__lowercase = '''lower newer'''
__lowercase = processor(text=snake_case_ , return_tensors='''np''' )
__lowercase = tokenizer(snake_case_ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = '''google/owlvit-base-patch32'''
__lowercase = OwlViTProcessor.from_pretrained(snake_case_ )
__lowercase = ['''cat''', '''nasa badge''']
__lowercase = processor(text=snake_case_ )
__lowercase = 1_6
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = '''google/owlvit-base-patch32'''
__lowercase = OwlViTProcessor.from_pretrained(snake_case_ )
__lowercase = [['''cat''', '''nasa badge'''], ['''person''']]
__lowercase = processor(text=snake_case_ )
__lowercase = 1_6
__lowercase = len(snake_case_ )
__lowercase = max([len(snake_case_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = '''google/owlvit-base-patch32'''
__lowercase = OwlViTProcessor.from_pretrained(snake_case_ )
__lowercase = ['''cat''', '''nasa badge''']
__lowercase = processor(text=snake_case_ )
__lowercase = 1_6
__lowercase = inputs['''input_ids''']
__lowercase = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__lowercase = self.prepare_image_inputs()
__lowercase = self.prepare_image_inputs()
__lowercase = processor(images=snake_case_ , query_images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(snake_case_ )
__lowercase = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
| 527 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 527 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
lowerCamelCase__ = {'''mgp-str''': 27}
class __magic_name__ (lowerCamelCase_ ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a="[GO]" , _a="[GO]" , _a="[s]" , _a="[GO]" , **_a ) -> Optional[Any]:
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ = json.load(snake_case__ )
lowerCAmelCase_ = {v: k for k, v in self.vocab.items()}
@property
def __a ( self ) -> str:
return len(self.vocab )
def __a ( self ) -> Optional[Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = []
for s in text:
char_tokens.extend(snake_case__ )
return char_tokens
def __a ( self , _a ) -> int:
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def __a ( self , _a ) -> Optional[int]:
return self.decoder.get(snake_case__ )
def __a ( self , _a , _a = None ) -> Optional[int]:
if not os.path.isdir(snake_case__ ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case__ ) )
return
lowerCAmelCase_ = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
return (vocab_file,)
| 122 |
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 0
for ch in input_str:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ord(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = pow(2 , lowerCamelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( __UpperCamelCase = 0.1 ):
__lowercase : Dict = 3
__lowercase : int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Union[str, Any] = 10
def _lowerCamelCase ( self ) -> str:
__lowercase : List[str] = [1, 2, 3, 4]
__lowercase : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowercase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> int:
__lowercase : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowercase : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__lowercase ,__lowercase : Optional[Any] = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = ''''''
__lowercase ,__lowercase : Any = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[str] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__lowercase ,__lowercase : int = process_story(UpperCamelCase_ )
__lowercase : Union[str, Any] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : List[str] = ['''It was the best of times.''']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Union[str, Any] = torch.tensor([1, 2, 3, 4] )
__lowercase : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowercase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowercase : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[Any] = 1_01
__lowercase : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
__lowercase : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowercase : Optional[int] = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 523 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A = None , __A = [] ):
"""simple docstring"""
lowerCamelCase : Any = 0
lowerCamelCase : Optional[int] = choices
lowerCamelCase : Optional[int] = prompt
if sys.platform == "win32":
lowerCamelCase : Any = "*"
else:
lowerCamelCase : Union[str, Any] = "➔ "
def _snake_case ( self , __A , __A = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __A )
else:
forceWrite(self.choices[index] , __A )
def _snake_case ( self , __A ):
"""simple docstring"""
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(__A )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def _snake_case ( self , __A , __A = 1 ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__A )
move_cursor(__A , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _snake_case ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _snake_case ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _snake_case ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _snake_case ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__A )] for number in range(10 )] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
lowerCamelCase : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __A )
else:
return
else:
return
def _snake_case ( self , __A = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowerCamelCase : Any = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__A )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowerCamelCase : str = int(builtins.input() )
except ValueError:
lowerCamelCase : Optional[Any] = default_choice
else:
lowerCamelCase : Optional[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__A , "\n" )
return choice
| 340 | 0 |
def lowerCamelCase_(lowerCamelCase_ = 50 ) -> int:
UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 457 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[Any] =CpmAntTokenizer
lowercase : Dict =False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
UpperCAmelCase = "今天天气真好!"
UpperCAmelCase = ["今天", "天气", "真", "好", "!"]
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = "今天天气真好!"
UpperCAmelCase = [tokenizer.bos_token] + tokens
UpperCAmelCase = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
UpperCAmelCase = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 457 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__magic_name__ = "http://www.mocksite.com/file1.txt"
__magic_name__ = "\"text\": [\"foo\", \"foo\"]"
__magic_name__ = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
a_ : Optional[int] =2_0_0
a_ : str ={"""Content-Length""": """100"""}
a_ : Optional[Any] ={}
def _lowerCAmelCase ( self : Optional[int] , **_snake_case : Optional[Any] ) -> str:
'''simple docstring'''
return [bytes(lowercase__ , 'utf-8' )]
def _lowerCamelCase ( *UpperCAmelCase__,**UpperCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize('urls_type',[str, list, dict] )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Tuple:
'''simple docstring'''
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE__,'request',SCREAMING_SNAKE_CASE__ )
a__ = URL
if issubclass(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
a__ = url
elif issubclass(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
a__ = [url]
elif issubclass(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
a__ = {"""train""": url}
a__ = """dummy"""
a__ = """downloads"""
a__ = tmp_path
a__ = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ),use_etag=SCREAMING_SNAKE_CASE__,)
a__ = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__,download_config=SCREAMING_SNAKE_CASE__ )
a__ = dl_manager.download(SCREAMING_SNAKE_CASE__ )
a__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
a__ = [downloaded_paths]
a__ = [urls]
elif isinstance(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
assert "train" in downloaded_paths.keys()
a__ = downloaded_paths.values()
a__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
a__ = Path(SCREAMING_SNAKE_CASE__ )
a__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
a__ = downloaded_path.read_text()
assert content == CONTENT
a__ = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
a__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type',[str, list, dict] )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ = str(SCREAMING_SNAKE_CASE__ )
if issubclass(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
a__ = filename
elif issubclass(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
a__ = [filename]
elif issubclass(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
a__ = {"""train""": filename}
a__ = """dummy"""
a__ = xz_file.parent
a__ = """extracted"""
a__ = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE__,use_etag=SCREAMING_SNAKE_CASE__,)
a__ = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__,download_config=SCREAMING_SNAKE_CASE__ )
a__ = dl_manager.extract(SCREAMING_SNAKE_CASE__ )
a__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
a__ = [extracted_paths]
a__ = [paths]
elif isinstance(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
assert "train" in extracted_paths.keys()
a__ = extracted_paths.values()
a__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
a__ = Path(SCREAMING_SNAKE_CASE__ )
a__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE__,etag=SCREAMING_SNAKE_CASE__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
a__ = extracted_path.read_text()
a__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
assert path.endswith('.jsonl' )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE__,start=1 ):
a__ = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl',['tar_jsonl_path', 'zip_jsonl_path'] )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
a__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ),start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl',['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
a__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ),start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ),start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ )
assert num_tar == 1
assert num_jsonl == 2
def _lowerCamelCase ( UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ),start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 232 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 2_0_0_0_0_0_0 ):
"""simple docstring"""
snake_case_ : Optional[Any] = [0 for i in range(n + 1 )]
snake_case_ : int = 1
snake_case_ : str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = 1
snake_case_ : Any = 0
for i in range(SCREAMING_SNAKE_CASE__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 480 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=10_00 , norm_type="ada_norm_zero" , norm_elementwise_affine=a__ , )
_A = AutoencoderKL()
_A = DDIMScheduler()
_A = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def a_ ( self : Optional[Any] , a__ : Any , a__ : Dict=0 ) -> Optional[Any]:
'''simple docstring'''
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_A = "cpu"
_A = self.get_dummy_components()
_A = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_A = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1E-3 )
def a_ ( self : str ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a_ ( self : str ) -> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Any ) -> Any:
'''simple docstring'''
_A = torch.manual_seed(0 )
_A = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
_A = ["vase", "umbrella", "white shark", "white wolf"]
_A = pipe.get_label_ids(a__ )
_A = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(a__ , a__ ):
_A = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def a_ ( self : str ) -> str:
'''simple docstring'''
_A = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
_A = ["vase", "umbrella"]
_A = pipe.get_label_ids(a__ )
_A = torch.manual_seed(0 )
_A = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(a__ , a__ ):
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 712 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = False ) -> str:
UpperCamelCase_ = scheduler
UpperCamelCase_ = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
UpperCamelCase_ = split_batches
UpperCamelCase_ = step_with_optimizer
UpperCamelCase_ = GradientState()
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCamelCase_ = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
return self.scheduler.get_last_lr()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.scheduler.state_dict()
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
self.scheduler.load_state_dict(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
return self.scheduler.get_lr()
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 23 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = tmp_path / '''cache'''
__UpperCamelCase :int = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase :Optional[Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = tmp_path / '''cache'''
__UpperCamelCase :Optional[Any] = {'''text''': '''string'''}
__UpperCamelCase :List[Any] = features.copy() if features else default_expected_features
__UpperCamelCase :Optional[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase :Union[str, Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = tmp_path / '''cache'''
__UpperCamelCase :str = {'''text''': '''string'''}
__UpperCamelCase :Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = text_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = [text_path]
__UpperCamelCase :Optional[Any] = tmp_path / '''cache'''
__UpperCamelCase :str = {'''text''': '''string'''}
__UpperCamelCase :int = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
__UpperCamelCase :Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = tmp_path / '''cache'''
__UpperCamelCase :str = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase :Optional[Any] = TextDatasetReader({'''train''': text_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__UpperCamelCase :List[str] = {'''text''': '''string'''}
__UpperCamelCase :Dict = features.copy() if features else default_expected_features
__UpperCamelCase :Dict = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase :Optional[Any] = TextDatasetReader({'''train''': text_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if split:
__UpperCamelCase :Any = {split: text_path}
else:
__UpperCamelCase :Optional[int] = '''train'''
__UpperCamelCase :List[Any] = {'''train''': text_path, '''test''': text_path}
__UpperCamelCase :Optional[Any] = tmp_path / '''cache'''
__UpperCamelCase :Dict = {'''text''': '''string'''}
__UpperCamelCase :Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 706 | import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase ( SCREAMING_SNAKE_CASE ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : int
a__ : str
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Dict = {}
__UpperCamelCase :int = []
__UpperCamelCase :int = 1
__UpperCamelCase :int = [1, 2]
__UpperCamelCase :Optional[Any] = {'''a''': 1, '''b''': 2}
__UpperCamelCase :Dict = {'''a''': [1, 2], '''b''': [3, 4]}
__UpperCamelCase :Optional[int] = {'''a''': {'''1''': 1}, '''b''': 2}
__UpperCamelCase :Optional[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__UpperCamelCase :str = {}
__UpperCamelCase :Any = []
__UpperCamelCase :Optional[Any] = 2
__UpperCamelCase :Optional[int] = [2, 3]
__UpperCamelCase :List[Any] = {'''a''': 2, '''b''': 3}
__UpperCamelCase :List[str] = {'''a''': [2, 3], '''b''': [4, 5]}
__UpperCamelCase :int = {'''a''': {'''1''': 2}, '''b''': 3}
__UpperCamelCase :Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(__lowercase , __lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase) , __lowercase)
__UpperCamelCase :Optional[Any] = 2
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase) , __lowercase)
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase) , __lowercase)
__UpperCamelCase :Dict = {'''a''': np.eye(2), '''b''': np.zeros(3), '''c''': np.ones(2)}
__UpperCamelCase :Optional[Any] = {'''a''': 2, '''b''': 0, '''c''': 2}
__UpperCamelCase :Optional[int] = {
'''a''': np.eye(2).astype(__lowercase),
'''b''': np.zeros(3).astype(__lowercase),
'''c''': np.ones(2).astype(__lowercase),
}
self.assertEqual(map_nested(__lowercase , __lowercase , map_numpy=__lowercase) , __lowercase)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowercase , __lowercase , map_numpy=__lowercase).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__lowercase , __lowercase , map_numpy=__lowercase , num_proc=__lowercase) , __lowercase)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowercase , __lowercase , map_numpy=__lowercase , num_proc=__lowercase).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__lowercase): # can't pickle a local lambda
map_nested(lambda __lowercase: x + 1 , __lowercase , num_proc=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[str] = {'''a''': 1, '''b''': 2}
__UpperCamelCase :int = {'''a''': 3, '''b''': 4}
__UpperCamelCase :List[str] = {'''a''': 5, '''b''': 6}
__UpperCamelCase :List[Any] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))])
self.assertEqual(sorted(zip_dict(__lowercase , __lowercase , __lowercase)) , __lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
class lowerCamelCase_ :
'''simple docstring'''
a__ : int = """bar"""
__UpperCamelCase :List[str] = Foo()
self.assertEqual(foo.my_attr , '''bar''')
with temporary_assignment(__lowercase , '''my_attr''' , '''BAR'''):
self.assertEqual(foo.my_attr , '''BAR''')
self.assertEqual(foo.my_attr , '''bar''')
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
__UpperCamelCase :Union[str, Any] = {f"""{i}""": i for i in range(SCREAMING_SNAKE_CASE )}
__UpperCamelCase :Dict = map_nested(lambda SCREAMING_SNAKE_CASE : x + 10 , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@require_tf
def UpperCamelCase__ ( self) -> Optional[int]:
import tensorflow as tf
from tensorflow.keras import layers
__UpperCamelCase :Optional[Any] = layers.Dense(2)
def gen_random_output():
__UpperCamelCase :Optional[Any] = tf.random.uniform((1, 3))
return model(__lowercase).numpy()
with temp_seed(42 , set_tensorflow=__lowercase):
__UpperCamelCase :Tuple = gen_random_output()
with temp_seed(42 , set_tensorflow=__lowercase):
__UpperCamelCase :int = gen_random_output()
__UpperCamelCase :List[str] = gen_random_output()
np.testing.assert_equal(__lowercase , __lowercase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def UpperCamelCase__ ( self) -> List[str]:
import torch
def gen_random_output():
__UpperCamelCase :List[Any] = torch.nn.Linear(3 , 2)
__UpperCamelCase :Optional[Any] = torch.rand(1 , 3)
return model(__lowercase).detach().numpy()
with temp_seed(42 , set_pytorch=__lowercase):
__UpperCamelCase :Union[str, Any] = gen_random_output()
with temp_seed(42 , set_pytorch=__lowercase):
__UpperCamelCase :List[str] = gen_random_output()
__UpperCamelCase :Dict = gen_random_output()
np.testing.assert_equal(__lowercase , __lowercase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def UpperCamelCase__ ( self) -> str:
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
__UpperCamelCase :List[Any] = gen_random_output()
with temp_seed(42):
__UpperCamelCase :Optional[Any] = gen_random_output()
__UpperCamelCase :Optional[Any] = gen_random_output()
np.testing.assert_equal(__lowercase , __lowercase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize('''input_data''' , [{}] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = NestedDataStructure(SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = NestedDataStructure(SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = A(x=1 , y='''foobar''' )
__UpperCamelCase :Dict = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(SCREAMING_SNAKE_CASE ) == expected_output
__UpperCamelCase :str = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
__UpperCamelCase :List[Any] = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y='''foo''' )] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return text.split()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCamelCase ( ):
'''simple docstring'''
with Pool(2 ) as pool:
__UpperCamelCase :Tuple = list(iflatmap_unordered(SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__UpperCamelCase :Any = list(iflatmap_unordered(SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__UpperCamelCase :Dict = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(SCREAMING_SNAKE_CASE ) == 4
| 452 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
) | 67 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
UpperCamelCase_ = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {F'''funnel-transformer/{name}''': 5_12 for name in _model_names}
UpperCamelCase_ = {F'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = FunnelTokenizer
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = 2
def __init__( self : Union[str, Any] , snake_case_ : Any=None , snake_case_ : Dict=None , snake_case_ : str=True , snake_case_ : Optional[Any]="<unk>" , snake_case_ : List[str]="<sep>" , snake_case_ : str="<pad>" , snake_case_ : Optional[Any]="<cls>" , snake_case_ : Dict="<mask>" , snake_case_ : Dict="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : int=True , snake_case_ : Dict=True , snake_case_ : Optional[Any]=None , snake_case_ : str="##" , **snake_case_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , clean_text=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , wordpieces_prefix=snake_case_ , **snake_case_ , )
A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case_ ) != tokenize_chinese_chars
):
A : Union[str, Any] = getattr(snake_case_ , normalizer_state.pop('''type''' ) )
A : List[str] = do_lower_case
A : int = strip_accents
A : int = tokenize_chinese_chars
A : Any = normalizer_class(**snake_case_ )
A : Any = do_lower_case
def _UpperCAmelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : List[Any]=None ):
"""simple docstring"""
A : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : List[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
"""simple docstring"""
A : Optional[int] = [self.sep_token_id]
A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
"""simple docstring"""
A : Dict = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ ) | 256 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
if len(snake_case_ ) == 0:
return array
_A , _A : Tuple = min(snake_case_ ), max(snake_case_ )
# Compute the variables
_A : Tuple = _max - _min + 1
_A , _A : str = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_A : Union[str, Any] = i - _min
_A : List[str] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_A : Tuple = 0
for i in range(snake_case_ ):
while holes_repeat[i] > 0:
_A : Union[str, Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by comma:\n")
_snake_case = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 54 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 1 |
def a ( A__ , A__ = 0 ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = length or len(__A )
SCREAMING_SNAKE_CASE__ : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE__ : Dict = True
return list_data if not swapped else bubble_sort(__A , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
"""simple docstring"""
def __init__(self: Optional[Any] , __UpperCAmelCase: List[str] , __UpperCAmelCase: int=12 , __UpperCAmelCase: Dict=7 , __UpperCAmelCase: str=True , __UpperCAmelCase: Tuple=True , __UpperCAmelCase: Tuple=True , __UpperCAmelCase: Optional[int]=99 , __UpperCAmelCase: str=32 , __UpperCAmelCase: Any=32 , __UpperCAmelCase: int=2 , __UpperCAmelCase: List[str]=4 , __UpperCAmelCase: Tuple=37 , __UpperCAmelCase: Optional[int]=0.1 , __UpperCAmelCase: List[Any]=0.1 , __UpperCAmelCase: Dict=512 , __UpperCAmelCase: Union[str, Any]=0.02 , __UpperCAmelCase: List[Any]=0 , __UpperCAmelCase: str=None , ) -> Tuple:
'''simple docstring'''
__a : Union[str, Any] = parent
__a : Dict = batch_size
__a : Optional[Any] = seq_length
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : str = use_labels
__a : Tuple = vocab_size
__a : Dict = hidden_size
__a : Optional[int] = projection_dim
__a : Tuple = num_hidden_layers
__a : Dict = num_attention_heads
__a : Any = intermediate_size
__a : Optional[int] = dropout
__a : Dict = attention_dropout
__a : List[Any] = max_position_embeddings
__a : Any = initializer_range
__a : Tuple = scope
__a : Dict = bos_token_id
def UpperCAmelCase__ (self: str ) -> Union[str, Any]:
'''simple docstring'''
__a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Any = None
if self.use_input_mask:
__a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__a : Tuple = input_mask.numpy()
__a , __a : int = input_mask.shape
__a : Dict = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__a : Any = 1
__a : int = 0
__a : Union[str, Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(__UpperCAmelCase )
def UpperCAmelCase__ (self: List[str] ) -> Optional[int]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: Any , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> List[str]:
'''simple docstring'''
__a : int = TFBlipTextModel(config=__UpperCAmelCase )
__a : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , training=__UpperCAmelCase )
__a : Any = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ (self: List[str] ) -> str:
'''simple docstring'''
__a : Tuple = self.prepare_config_and_inputs()
__a , __a , __a : Dict = config_and_inputs
__a : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (TFBlipTextModel,) if is_tf_available() else ()
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCAmelCase__ (self: List[str] ) -> Optional[int]:
'''simple docstring'''
__a : str = BlipTextModelTester(self )
__a : str = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ (self: str ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self: Any ) -> Union[str, Any]:
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase__ (self: Tuple ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCAmelCase__ (self: Any ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase__ (self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase__ (self: Union[str, Any] ) -> int:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ (self: Optional[Any] ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[str] = TFBlipTextModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: Optional[Any]=True ) -> Any:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCAmelCase )
| 351 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=None , **_lowerCAmelCase ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_lowercase : str = model
_lowercase : str = kwargs.get('model_save_dir' , _lowerCAmelCase )
_lowercase : int = kwargs.get('latest_model_name' , _lowerCAmelCase )
def __call__( self , **_lowerCAmelCase ):
_lowercase : int = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def __a ( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_lowercase : Dict = 'CPUExecutionProvider'
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase ):
_lowercase : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_lowercase : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
_lowercase : Any = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_lowercase : Optional[int] = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
_lowercase : Optional[Any] = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def __a ( self , _lowerCAmelCase , **_lowerCAmelCase , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def __a ( cls , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowercase : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
_lowercase : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
_lowercase : List[str] = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
_lowercase : List[Any] = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
_lowercase : List[Any] = Path(_lowerCAmelCase ).parent
_lowercase : Tuple = Path(_lowerCAmelCase ).name
_lowercase : Any = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def __a ( cls , _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowercase : Optional[int] = None
if len(str(_lowerCAmelCase ).split('@' ) ) == 2:
_lowercase : int = model_id.split('@' )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 704 |
import qiskit
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts:
_lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 677 | 0 |
import math
import unittest
def __UpperCamelCase ( _A ):
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ), '''Zero doesn\'t have any positive factors, primes must have exactly two.''', )
self.assertFalse(
is_prime(1 ), '''One only has 1 positive factor, primes must have exactly two.''', )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 431 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class A ( __UpperCAmelCase ):
def __init__( self, **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self, '''vision''' )
self.check_model_type(UpperCamelCase__ )
def __call__( self, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
if "text_queries" in kwargs:
lowerCAmelCase_ = kwargs.pop('''text_queries''' )
if isinstance(UpperCamelCase__, (str, Image.Image) ):
lowerCAmelCase_ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(UpperCamelCase__, **UpperCamelCase__ )
return results
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {}
if "threshold" in kwargs:
lowerCAmelCase_ = kwargs['''threshold''']
if "top_k" in kwargs:
lowerCAmelCase_ = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = load_image(inputs['''image'''] )
lowerCAmelCase_ = inputs['''candidate_labels''']
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = candidate_labels.split(''',''' )
lowerCAmelCase_ = torch.tensor([[image.height, image.width]], dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase__ ):
lowerCAmelCase_ = self.tokenizer(UpperCamelCase__, return_tensors=self.framework )
lowerCAmelCase_ = self.image_processor(UpperCamelCase__, return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = model_inputs.pop('''target_size''' )
lowerCAmelCase_ = model_inputs.pop('''candidate_label''' )
lowerCAmelCase_ = model_inputs.pop('''is_last''' )
lowerCAmelCase_ = self.model(**UpperCamelCase__ )
lowerCAmelCase_ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=0.1, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = []
for model_output in model_outputs:
lowerCAmelCase_ = model_output['''candidate_label''']
lowerCAmelCase_ = BaseModelOutput(UpperCamelCase__ )
lowerCAmelCase_ = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase__, threshold=UpperCamelCase__, target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase_ = outputs['''scores'''][index].item()
lowerCAmelCase_ = self._get_bounding_box(outputs['''boxes'''][index][0] )
lowerCAmelCase_ = {'''score''': score, '''label''': label, '''box''': box}
results.append(UpperCamelCase__ )
lowerCAmelCase_ = sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x["score"], reverse=UpperCamelCase__ )
if top_k:
lowerCAmelCase_ = results[:top_k]
return results
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = box.int().tolist()
lowerCAmelCase_ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 431 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def lowerCamelCase ( UpperCamelCase : List[str] ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def lowerCamelCase ( UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : Optional[int] ) -> np.ndarray:
_lowerCamelCase = np.nan
for i in range(UpperCamelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def lowerCamelCase ( UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ) -> np.ndarray:
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(UpperCamelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase ) , (column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase ) , (column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : Dict ) -> np.ndarray:
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(UpperCamelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(UpperCamelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase = np.linalg.eigh(UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , UpperCamelCase )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=UpperCamelCase )
logging.error('Dataset empty' )
raise AssertionError
def lowerCamelCase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase = eigh(
covariance_between_classes(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , covariance_within_classes(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase = np.linalg.svd(UpperCamelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , UpperCamelCase )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=UpperCamelCase )
logging.error('Dataset empty' )
raise AssertionError
def lowerCamelCase ( ) -> None:
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if isinstance(UpperCamelCase , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def lowerCamelCase ( ) -> None:
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(UpperCamelCase , UpperCamelCase )
if not np.allclose(UpperCamelCase , UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod() | 719 | import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = LxmertTokenizer
lowerCAmelCase_ = LxmertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def _snake_case ( self : List[str] ) -> Any:
super().setUp()
_lowerCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _snake_case ( self : Any , snake_case__ : Dict ) -> str:
_lowerCamelCase = 'UNwant\u00E9d,running'
_lowerCamelCase = 'unwanted, running'
return input_text, output_text
def _snake_case ( self : Optional[int] ) -> List[Any]:
_lowerCamelCase = self.tokenizer_class(self.vocab_file )
_lowerCamelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 1_0, 8, 9] )
def _snake_case ( self : Any ) -> List[str]:
if not self.test_rust_tokenizer:
return
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = 'I was born in 92000, and this is falsé.'
_lowerCamelCase = tokenizer.tokenize(snake_case__ )
_lowerCamelCase = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCamelCase = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCamelCase = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = tokenizer.encode(snake_case__ )
_lowerCamelCase = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ ) | 234 | 0 |
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
__SCREAMING_SNAKE_CASE : Dict = True
for i in range(0 , len(lowercase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__SCREAMING_SNAKE_CASE : Optional[int] = False
for i in range(1 , len(lowercase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__SCREAMING_SNAKE_CASE : Dict = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
__lowerCAmelCase : Tuple =[int(x) for x in input().split()]
# inputing elements of the list in one line
__lowerCAmelCase : List[Any] =odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 649 |
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits | 649 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if nth_term == "":
return [""]
lowerCAmelCase__ : Union[str, Any] = int(lowerCamelCase_ )
lowerCAmelCase__ : List[str] = int(lowerCamelCase_ )
lowerCAmelCase__ : list[str] = []
for temp in range(int(lowerCamelCase_ ) ):
series.append(f'''1 / {pow(temp + 1 , int(lowerCamelCase_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case = int(input("""Enter the last number (nth term) of the P-Series"""))
snake_case = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 378 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
move_disk(lowerCamelCase_ , lowerCamelCase_ )
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
print("moving disk from" , lowerCamelCase_ , "to" , lowerCamelCase_ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = int(input("Height of hanoi: " ).strip() )
move_tower(lowerCamelCase_ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 378 | 1 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase = logging.getLogger()
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def lowercase ( a__ : Tuple , a__ : Dict="eval" ) -> List[Any]:
_UpperCamelCase = os.path.join(a__ , F'''{split}_results.json''' )
if os.path.exists(a__ ):
with open(a__ , '''r''' ) as f:
return json.load(a__ )
raise ValueError(F'''can\'t find {path}''' )
UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( _lowercase):
def _UpperCamelCase ( self : str ) -> int:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_flax_glue.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_clm_flax.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_summarization_flax.main()
_UpperCamelCase = get_results(__UpperCamelCase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_mlm_flax.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_ta_mlm_flax.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_flax_ner.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_qa.main()
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 342 | """simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
snake_case__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''})
snake_case__ = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''})
snake_case__ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''})
snake_case__ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''})
snake_case__ = field(default=2 , metadata={'''help''': '''Batch size for training.'''})
snake_case__ = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''})
snake_case__ = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''})
snake_case__ = field(
default=1_00_00 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''})
snake_case__ = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''})
snake_case__ = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''})
snake_case__ = field(
default=7_50 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''})
snake_case__ = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''})
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''})
snake_case__ = field(default=5_00_00 , metadata={'''help''': '''Maximum number of training steps.'''})
snake_case__ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''})
snake_case__ = field(default=10_24 , metadata={'''help''': '''Sequence lengths used for training.'''})
snake_case__ = field(default=1 , metadata={'''help''': '''Training seed.'''})
snake_case__ = field(
default=10_24 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''If True the data is pretokenized.'''})
@dataclass
class UpperCAmelCase_ :
snake_case__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''})
snake_case__ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''})
snake_case__ = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''})
snake_case__ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''})
snake_case__ = field(default=10_24 , metadata={'''help''': '''Length of sequences to be evaluated.'''})
snake_case__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''})
@dataclass
class UpperCAmelCase_ :
snake_case__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''Number of workers used for code evaluation.'''})
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''Sample from the language model\'s output distribution.'''})
snake_case__ = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''})
snake_case__ = field(default=2_56 , metadata={'''help''': '''Maximum number of newly generated tokens.'''})
snake_case__ = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''})
snake_case__ = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''})
snake_case__ = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''})
snake_case__ = field(
default=2_00 , metadata={'''help''': '''Number of completions to generate for each sample.'''})
snake_case__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''})
snake_case__ = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''})
snake_case__ = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''})
snake_case__ = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class UpperCAmelCase_ :
snake_case__ = field(
default=_lowercase , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
snake_case__ = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''})
snake_case__ = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''})
snake_case__ = field(
default=10_00_00 , metadata={'''help''': '''Number of files to save per JSON output file.'''})
snake_case__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''})
snake_case__ = field(
default=10_00 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''})
snake_case__ = field(
default=1_00 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''})
snake_case__ = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''})
snake_case__ = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''})
snake_case__ = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''})
snake_case__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''If True, near-duplicate samples are removed.'''})
snake_case__ = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''})
@dataclass
class UpperCAmelCase_ :
snake_case__ = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''})
snake_case__ = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''})
snake_case__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''})
snake_case__ = field(default=20_00_00 , metadata={'''help''': '''Number of examples to train tokenizer on.'''})
snake_case__ = field(
default=3_27_68 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''})
snake_case__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''Push saved tokenizer to the hub.'''})
@dataclass
class UpperCAmelCase_ :
snake_case__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''})
snake_case__ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''})
snake_case__ = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''Number of workers used for code evaluation.'''})
@dataclass
class UpperCAmelCase_ :
snake_case__ = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''})
snake_case__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''})
snake_case__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''Push saved tokenizer to the hub.'''})
| 342 | 1 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = 1
UpperCamelCase : int = 2
while i * i <= n:
UpperCamelCase : List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase ():
UpperCamelCase : str = 1
UpperCamelCase : Union[str, Any] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 102 |
from ....utils import logging
a_ :Optional[int] = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any], _snake_case : List[str], _snake_case : Any=None, _snake_case : Tuple=2_0_4_8 ) ->List[str]:
snake_case__ : Dict = config.__dict__
snake_case__ : Optional[Any] = modal_hidden_size
if num_labels:
snake_case__ : Union[str, Any] = num_labels
| 478 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""MobileViTFeatureExtractor"""]
UpperCAmelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 713 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 531 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Any = ['''audio_values''', '''audio_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=[16, 16] , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=4_4100 , __SCREAMING_SNAKE_CASE=86 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=0.0 , **__SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = spectrogram_length
__snake_case = num_channels
__snake_case = patch_size
__snake_case = feature_size // self.patch_size[1]
__snake_case = n_fft
__snake_case = sampling_rate // hop_length_to_sampling_rate
__snake_case = sampling_rate
__snake_case = padding_value
__snake_case = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
__snake_case = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
__snake_case = log_spec[:, :-1]
__snake_case = log_spec - 20.0
__snake_case = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__snake_case = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__snake_case = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__snake_case = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__snake_case = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__snake_case = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__snake_case = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
__snake_case = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__snake_case = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__snake_case = padded_audio_features * self.padding_value
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__snake_case = audio_features[i]
__snake_case = feature
# return as BatchFeature
if return_attention_mask:
__snake_case = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
__snake_case = {'''audio_values''': padded_audio_features}
__snake_case = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : float | Decimal, _UpperCamelCase : float = 10**-10 ) -> float:
A_ = a
while True:
A_ = Decimal(_UpperCamelCase ) - (
Decimal(eval(_UpperCamelCase ) ) / Decimal(eval(str(diff(_UpperCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_UpperCamelCase ) ) < precision: # noqa: S307
return float(_UpperCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 174 | '''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.02 , ) -> List[Any]:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = rotary_dim
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = None
A_ = vocab_size - 1
A_ = vocab_size - 1
A_ = vocab_size - 1
def __A ( self ) -> List[str]:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __A ( self ) -> Any:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = 20
A_ = model_class_name(_SCREAMING_SNAKE_CASE )
A_ = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
A_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A_ = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = 20
A_ = model_class_name(_SCREAMING_SNAKE_CASE )
A_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
A_ = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
A_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A_ = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase : int = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __A ( self ) -> List[Any]:
A_ = FlaxGPTJModelTester(self )
def __A ( self ) -> Any:
for model_class_name in self.all_model_classes:
A_ ,A_ ,A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
A_ ,A_ ,A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __A ( self ) -> Any:
A_ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
A_ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
A_ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
A_ = False
A_ = model.config.eos_token_id
A_ = jax.jit(model.generate )
A_ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
A_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
A_ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ ,A_ = pt_inputs['''input_ids'''].shape
A_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
A_ = 0
A_ = 1
A_ = 0
A_ = 1
A_ = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
A_ = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
A_ = fx_state
with torch.no_grad():
A_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
A_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
A_ = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
A_ = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
A_ ,A_ = pt_inputs['''input_ids'''].shape
A_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
A_ = 0
A_ = 1
A_ = 0
A_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
A_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
A_ = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __A ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
A_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 174 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =42
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> list[str]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
UpperCAmelCase = all_rotations(SCREAMING_SNAKE_CASE_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE_ ),
}
return response
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
UpperCAmelCase = int(SCREAMING_SNAKE_CASE_ )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
UpperCAmelCase = [''''''] * len(SCREAMING_SNAKE_CASE_ )
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCAmelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ : str = 'Provide a string that I will generate its BWT transform: '
a__ : List[str] = input(entry_msg).strip()
a__ : str = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
a__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 51 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 1 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
_lowerCAmelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Github(os.environ['GITHUB_TOKEN'] )
_lowerCAmelCase : Union[str, Any] = g.get_repo('huggingface/transformers' )
_lowerCAmelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_lowerCAmelCase : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCamelCase : i.created_at , reverse=_lowerCamelCase )
_lowerCAmelCase : Tuple = comments[0] if len(_lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 720 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] = logging.get_logger()
@dataclass
class _lowercase :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : nn.Module
_SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE : list = field(default_factory=UpperCAmelCase__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tuple:
__lowerCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Any:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE__ )
[x.remove() for x in self.handles]
return self
@property
def a ( self : Dict ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda SCREAMING_SNAKE_CASE__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowercase :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : nn.Module
_SCREAMING_SNAKE_CASE : nn.Module
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : List = field(default_factory=UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE : List = field(default_factory=UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE : bool = True
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Tensor ) -> str:
__lowerCAmelCase = Tracker(self.dest )(SCREAMING_SNAKE_CASE__ ).parametrized
__lowerCAmelCase = Tracker(self.src )(SCREAMING_SNAKE_CASE__ ).parametrized
__lowerCAmelCase = list(filter(lambda SCREAMING_SNAKE_CASE__ : type(SCREAMING_SNAKE_CASE__ ) not in self.src_skip , SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = list(filter(lambda SCREAMING_SNAKE_CASE__ : type(SCREAMING_SNAKE_CASE__ ) not in self.dest_skip , SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE__ )} operations while"""
f""" destination module has {len(SCREAMING_SNAKE_CASE__ )}.""" )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : nn.Module ) -> str:
super().__init__()
__lowerCAmelCase = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), f"""Unexpected layer name {k}"""
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
__lowerCAmelCase = nn.ModuleDict(SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Tensor ) -> int:
return get_trunk_forward_outputs(
SCREAMING_SNAKE_CASE__ , out_feat_keys=SCREAMING_SNAKE_CASE__ , feature_blocks=self._feature_blocks , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def a ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> str:
__lowerCAmelCase = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
__lowerCAmelCase = self.convert_name_to_timm(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = partial(lambda: (timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval(), None) )
else:
__lowerCAmelCase = super().__getitem__(SCREAMING_SNAKE_CASE__ )
return val
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : str ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
__lowerCAmelCase = RegNetModel
else:
__lowerCAmelCase = RegNetForImageClassification
return val
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : List[Tuple[str, str]] ) -> Union[str, Any]:
'''simple docstring'''
for from_key, to_key in keys:
__lowerCAmelCase = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Callable[[], nn.Module] , snake_case_ : Callable[[], nn.Module] , snake_case_ : RegNetConfig , snake_case_ : Path , snake_case_ : bool = True , ) -> int:
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase = from_model_func()
__lowerCAmelCase = our_model_func(snake_case_ ).eval()
__lowerCAmelCase = ModuleTransfer(src=snake_case_ , dest=snake_case_ , raise_if_mismatch=snake_case_ )
__lowerCAmelCase = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(snake_case_ )
if from_state_dict is not None:
__lowerCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__lowerCAmelCase = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
__lowerCAmelCase = manually_copy_vissl_head(snake_case_ , our_model.state_dict() , snake_case_ )
our_model.load_state_dict(snake_case_ )
__lowerCAmelCase = our_model(snake_case_ , output_hidden_states=snake_case_ )
__lowerCAmelCase = (
our_outputs.logits if isinstance(snake_case_ , snake_case_ ) else our_outputs.last_hidden_state
)
__lowerCAmelCase = from_model(snake_case_ )
__lowerCAmelCase = from_output[-1] if type(snake_case_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__lowerCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(snake_case_ , snake_case_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=snake_case_ , )
__lowerCAmelCase = 2_24 if """seer""" not in name else 3_84
# we can use the convnext one
__lowerCAmelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=snake_case_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=snake_case_ , )
print(f"""Pushed {name}""" )
def UpperCamelCase_ ( snake_case_ : Path , snake_case_ : str = None , snake_case_ : bool = True ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = """imagenet-1k-id2label.json"""
__lowerCAmelCase = 10_00
__lowerCAmelCase = (1, num_labels)
__lowerCAmelCase = """huggingface/label-files"""
__lowerCAmelCase = num_labels
__lowerCAmelCase = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = partial(snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ )
__lowerCAmelCase = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
__lowerCAmelCase = NameToOurModelFuncMap()
__lowerCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(snake_case_ : str , snake_case_ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , model_dir=str(snake_case_ ) , map_location="""cpu""" )
__lowerCAmelCase = model_func()
# check if we have a head, if yes add it
__lowerCAmelCase = files["""classy_state_dict"""]["""base_model"""]["""model"""]
__lowerCAmelCase = model_state_dict["""trunk"""]
model.load_state_dict(snake_case_ )
return model.eval(), model_state_dict["heads"]
# pretrained
__lowerCAmelCase = partial(
snake_case_ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
snake_case_ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
snake_case_ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase = partial(
snake_case_ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
__lowerCAmelCase = partial(
snake_case_ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
snake_case_ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
snake_case_ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase = partial(
snake_case_ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
snake_case_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , snake_case_ , snake_case_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
snake_case_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , snake_case_ , snake_case_ , snake_case_ , )
return config, expected_shape
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_A : str = parser.parse_args()
_A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 427 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Optional[int] = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 427 | 1 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _snake_case ( A_ : str , A_ : complex , A_ : str = "x" , A_ : float = 10**-10 , A_ : int = 1 , ):
"""simple docstring"""
a_ : List[str] = symbols(A_ )
a_ : str = lambdify(A_ , A_ )
a_ : Union[str, Any] = lambdify(A_ , diff(A_ , A_ ) )
a_ : int = starting_point
while True:
if diff_function(A_ ) != 0:
a_ : List[Any] = prev_guess - multiplicity * func(A_ ) / diff_function(
A_ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
a_ : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 460 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
a_ = None
def _snake_case ( A_ : int , A_ : List[str]=0.999 , A_ : List[Any]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A_ : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A_ : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a_ : int = []
for i in range(A_ ):
a_ : Optional[Any] = i / num_diffusion_timesteps
a_ : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A_ ) / alpha_bar_fn(A_ ) , A_ ) )
return torch.tensor(A_ , dtype=torch.floataa )
class _UpperCAmelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase_ = 10_00 , lowerCAmelCase_ = "fixed_small_log" , lowerCAmelCase_ = True , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = "epsilon" , lowerCAmelCase_ = "squaredcos_cap_v2" , ):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
a_ : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase_ )
a_ : Optional[Any] = 1.0 - self.betas
a_ : List[str] = torch.cumprod(self.alphas , dim=0 )
a_ : Any = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
a_ : Any = 1.0
# setable values
a_ : List[Any] = None
a_ : Any = torch.from_numpy(np.arange(0 , lowerCAmelCase_ )[::-1].copy() )
a_ : Any = variance_type
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
return sample
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
a_ : List[str] = num_inference_steps
a_ : Dict = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
a_ : Optional[Any] = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
a_ : List[Any] = torch.from_numpy(lowerCAmelCase_ ).to(lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ):
'''simple docstring'''
if prev_timestep is None:
a_ : Tuple = t - 1
a_ : Optional[Any] = self.alphas_cumprod[t]
a_ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a_ : Optional[Any] = 1 - alpha_prod_t
a_ : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a_ : int = self.betas[t]
else:
a_ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
a_ : str = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
a_ : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
a_ : List[Any] = torch.log(torch.clamp(lowerCAmelCase_ , min=1E-2_0 ) )
a_ : List[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
a_ : Tuple = variance.log()
a_ : int = beta.log()
a_ : List[Any] = (predicted_variance + 1) / 2
a_ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_ = True , ):
'''simple docstring'''
a_ : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
a_ , a_ : Optional[Any] = torch.split(lowerCAmelCase_ , sample.shape[1] , dim=1 )
else:
a_ : Optional[int] = None
# 1. compute alphas, betas
if prev_timestep is None:
a_ : List[Any] = t - 1
a_ : int = self.alphas_cumprod[t]
a_ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a_ : Any = 1 - alpha_prod_t
a_ : Any = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a_ : Optional[int] = self.betas[t]
a_ : Any = self.alphas[t]
else:
a_ : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
a_ : List[str] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
a_ : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
a_ : Optional[int] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
a_ : Any = torch.clamp(
lowerCAmelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a_ : str = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
a_ : Dict = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a_ : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a_ : List[Any] = 0
if t > 0:
a_ : List[Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase_ , device=model_output.device )
a_ : List[Any] = self._get_variance(
lowerCAmelCase_ , predicted_variance=lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ , )
if self.variance_type == "fixed_small_log":
a_ : Dict = variance
elif self.variance_type == "learned_range":
a_ : Any = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
""" for the UnCLIPScheduler.""" )
a_ : List[str] = variance * variance_noise
a_ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
a_ : Union[str, Any] = timesteps.to(original_samples.device )
a_ : List[Any] = alphas_cumprod[timesteps] ** 0.5
a_ : Optional[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
a_ : int = sqrt_alpha_prod.unsqueeze(-1 )
a_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
a_ : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
a_ : str = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
a_ : int = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 460 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 508 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=[2, 3, 4] , _SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_stages
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = scope
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = ConvNextModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = ConvNextForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = ConvNextBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = ConvNextBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : int = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ConvNextModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ConvNextModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def snake_case ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class _snake_case ( unittest.TestCase , a_ ):
SCREAMING_SNAKE_CASE : Dict = (ConvNextBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Tuple = ConvNextConfig
SCREAMING_SNAKE_CASE : Dict = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ConvNextModelTester(self )
| 284 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase( _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "umt5"
lowercase__ = ["past_key_values"]
def __init__( self: Any, a_: Union[str, Any]=250_112, a_: Tuple=512, a_: List[Any]=64, a_: str=1_024, a_: List[str]=8, a_: Dict=None, a_: Optional[int]=6, a_: int=32, a_: Dict=128, a_: List[Any]=0.1, a_: List[Any]=1E-6, a_: List[str]=1.0, a_: Union[str, Any]="gated-gelu", a_: Union[str, Any]=True, a_: Dict=True, a_: Tuple="T5Tokenizer", a_: List[str]=True, a_: int=0, a_: int=1, a_: int=0, **a_: Optional[Any], ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=lowerCamelCase_, tokenizer_class=lowerCamelCase_, tie_word_embeddings=lowerCamelCase_, pad_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, **lowerCamelCase_, )
_snake_case : List[str] = vocab_size
_snake_case : List[Any] = d_model
_snake_case : List[Any] = d_kv
_snake_case : Optional[int] = d_ff
_snake_case : List[str] = num_layers
_snake_case : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_snake_case : Dict = num_heads
_snake_case : int = relative_attention_num_buckets
_snake_case : str = relative_attention_max_distance
_snake_case : str = dropout_rate
_snake_case : int = layer_norm_epsilon
_snake_case : Optional[Any] = initializer_factor
_snake_case : List[str] = feed_forward_proj
_snake_case : List[str] = use_cache
_snake_case : Any = self.feed_forward_proj.split("""-""" )
_snake_case : Tuple = act_info[-1]
_snake_case : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
if feed_forward_proj == "gated-gelu":
_snake_case : Any = '''gelu_new'''
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return self.d_model
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.num_heads
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return self.num_layers
class lowercase( _UpperCAmelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_snake_case : int = '''past_encoder_sequence + sequence'''
_snake_case : str = {0: '''batch'''}
_snake_case : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_snake_case : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
_snake_case : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_, direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return 13
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return 5E-4
| 718 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "feature_extractor"]
lowercase__ = "TvltImageProcessor"
lowercase__ = "TvltFeatureExtractor"
def __init__( self: Dict, a_: Union[str, Any], a_: Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=a_, feature_extractor=a_ )
_snake_case : Any = image_processor
_snake_case : Dict = feature_extractor
def __call__( self: int, a_: str=None, a_: Tuple=None, a_: Dict=None, a_: str=None, a_: Optional[int]=False, a_: Tuple=False, *a_: List[str], **a_: int, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_snake_case : Optional[int] = None
if images is not None:
_snake_case : Tuple = self.image_processor(a_, mask_pixel=a_, *a_, **a_ )
if images_mixed is not None:
_snake_case : Optional[int] = self.image_processor(a_, is_mixed=a_, *a_, **a_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(
a_, *a_, sampling_rate=a_, mask_audio=a_, **a_ )
_snake_case : List[str] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Dict = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 28 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_A = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase_ ( __UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Any = {}
with open(__UpperCAmelCase , """r""" ) as file:
for line_number, line in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = line.strip()
if line:
lowerCAmelCase__ : Dict = line.split()
lowerCAmelCase__ : Optional[int] = line_number
lowerCAmelCase__ : Union[str, Any] = words[0]
lowerCAmelCase__ : List[Any] = value
return result
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
for attribute in key.split(""".""" ):
lowerCAmelCase__ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowerCAmelCase__ : int = """param"""
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ : str = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ : Union[str, Any] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
lowerCAmelCase__ : int = getattr(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = shape_pointer.shape
# let's reduce dimension
lowerCAmelCase__ : Dict = value[0]
else:
lowerCAmelCase__ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ : Optional[int] = value
elif weight_type == "weight_g":
lowerCAmelCase__ : Optional[Any] = value
elif weight_type == "weight_v":
lowerCAmelCase__ : Union[str, Any] = value
elif weight_type == "bias":
lowerCAmelCase__ : int = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
lowerCAmelCase__ : Optional[int] = getattr(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = value
else:
lowerCAmelCase__ : Optional[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowerCAmelCase__ : Optional[int] = """param"""
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ : List[str] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ : str = """.""".join([key, hf_param_name] )
else:
lowerCAmelCase__ : int = key
lowerCAmelCase__ : Tuple = value if """lm_head""" in full_key else value[0]
_A = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Tuple:
lowerCAmelCase__ : str = False
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ : List[str] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase__ : int = True
if "*" in mapped_key:
lowerCAmelCase__ : Dict = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
lowerCAmelCase__ : Tuple = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
lowerCAmelCase__ : List[Any] = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ : Optional[Any] = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ : Tuple = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ : Optional[int] = """weight"""
else:
lowerCAmelCase__ : Dict = None
if hf_dict is not None:
rename_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return is_used
return is_used
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = fairseq_model.state_dict()
lowerCAmelCase__ : List[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ : Dict = True
else:
lowerCAmelCase__ : Union[str, Any] = load_wavaveca_layer(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : str = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ : Optional[int] = name.split(""".""" )
lowerCAmelCase__ : List[Any] = int(items[0] )
lowerCAmelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False ) -> Optional[Any]:
if config_path is not None:
lowerCAmelCase__ : int = WavaVecaConfig.from_pretrained(__UpperCAmelCase )
else:
lowerCAmelCase__ : Any = WavaVecaConfig()
if is_seq_class:
lowerCAmelCase__ : str = read_txt_into_dict(__UpperCAmelCase )
lowerCAmelCase__ : Dict = idalabel
lowerCAmelCase__ : str = WavaVecaForSequenceClassification(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
feature_extractor.save_pretrained(__UpperCAmelCase )
elif is_finetuned:
if dict_path:
lowerCAmelCase__ : Optional[int] = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ : Dict = target_dict.pad_index
lowerCAmelCase__ : str = target_dict.bos_index
lowerCAmelCase__ : List[Any] = target_dict.eos_index
lowerCAmelCase__ : Union[str, Any] = len(target_dict.symbols )
lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase , """vocab.json""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : int = 1
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , )
lowerCAmelCase__ : Tuple = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
lowerCAmelCase__ : List[str] = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = WavaVecaForCTC(__UpperCAmelCase )
else:
lowerCAmelCase__ : List[str] = WavaVecaForPreTraining(__UpperCAmelCase )
if is_finetuned or is_seq_class:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase__ : Any = argparse.Namespace(task="""audio_pretraining""" )
lowerCAmelCase__ : List[Any] = fairseq.tasks.setup_task(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCAmelCase )
lowerCAmelCase__ : str = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_A = parser.parse_args()
_A = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 299 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class _lowerCamelCase ( a_ ):
def __init__( self : str , **UpperCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(**UpperCamelCase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(UpperCamelCase )
def _lowerCAmelCase ( self : str , **UpperCamelCase : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {}
lowerCAmelCase__ : List[str] = {}
lowerCAmelCase__ : Any = {}
# preprocess args
if "points_per_batch" in kwargs:
lowerCAmelCase__ : List[Any] = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
lowerCAmelCase__ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
lowerCAmelCase__ : Any = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
lowerCAmelCase__ : Optional[int] = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
lowerCAmelCase__ : Union[str, Any] = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCAmelCase__ : Optional[Any] = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
lowerCAmelCase__ : Dict = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
lowerCAmelCase__ : List[Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
lowerCAmelCase__ : str = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
lowerCAmelCase__ : Optional[Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
lowerCAmelCase__ : Optional[int] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
lowerCAmelCase__ : List[Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Tuple , UpperCamelCase : Dict , *UpperCamelCase : Optional[Any] , UpperCamelCase : str=None , UpperCamelCase : List[Any]=None , **UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
return super().__call__(UpperCamelCase , *UpperCamelCase , num_workers=UpperCamelCase , batch_size=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : List[str]=64 , UpperCamelCase : int = 0 , UpperCamelCase : float = 5_12 / 15_00 , UpperCamelCase : Optional[int] = 32 , UpperCamelCase : Optional[int] = 1 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = load_image(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = self.image_processor.size["""longest_edge"""]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.image_processor.generate_crop_boxes(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = self.image_processor(images=UpperCamelCase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
lowerCAmelCase__ : Optional[Any] = self.get_inference_context()
with inference_context():
lowerCAmelCase__ : str = self._ensure_tensor_on_device(UpperCamelCase , device=self.device )
lowerCAmelCase__ : Optional[Any] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
lowerCAmelCase__ : Optional[Any] = image_embeddings
lowerCAmelCase__ : Tuple = grid_points.shape[1]
lowerCAmelCase__ : Any = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = grid_points[:, i : i + points_per_batch, :, :]
lowerCAmelCase__ : Union[str, Any] = input_labels[:, i : i + points_per_batch]
lowerCAmelCase__ : Union[str, Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[Any]=0.88 , UpperCamelCase : List[Any]=0.95 , UpperCamelCase : Dict=0 , UpperCamelCase : List[str]=1 , ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = model_inputs.pop("""input_boxes""" )
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""is_last""" )
lowerCAmelCase__ : Dict = model_inputs.pop("""original_sizes""" ).tolist()
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
lowerCAmelCase__ : Dict = self.model(**UpperCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCAmelCase__ : Dict = model_outputs["""pred_masks"""]
lowerCAmelCase__ : Tuple = self.image_processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , binarize=UpperCamelCase )
lowerCAmelCase__ : Dict = model_outputs["""iou_scores"""]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict=False , UpperCamelCase : Tuple=False , UpperCamelCase : Union[str, Any]=0.7 , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.cat(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.image_processor.post_process_for_mask_generation(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : int = defaultdict(UpperCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCamelCase )
lowerCAmelCase__ : Tuple = {}
if output_rle_mask:
lowerCAmelCase__ : Optional[int] = rle_mask
if output_bboxes_mask:
lowerCAmelCase__ : Tuple = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 299 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Tuple=37 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : Optional[Any]=16 , _lowerCAmelCase : str=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Optional[Any]=4 , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_lowerCAmelCase , )
return config, input_ids, attention_mask
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :int = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = FlaxDistilBertModelTester(self )
@slow
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""distilbert-base-uncased""" )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__lowercase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
__lowercase = (1, 11, 768)
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1e-4 ) )
| 53 |
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_lowercase , max_perimeter + 1 ):
UpperCAmelCase : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_lowercase ):
UpperCAmelCase : Tuple = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
UpperCAmelCase : List[Any] = pythagorean_triple(_lowercase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 679 | import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__UpperCamelCase : Optional[Any] = '<<<<<<< This should probably be modified because it mentions: '
__UpperCamelCase : Optional[Any] = '=======\n>>>>>>>\n'
__UpperCamelCase : Optional[int] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__UpperCamelCase : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def A ( _lowercase ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase__ ( UpperCamelCase_):
@staticmethod
def __A ( UpperCamelCase__ : ArgumentParser ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str , *UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = get_logger('''datasets-cli/converting''' )
SCREAMING_SNAKE_CASE : List[str] = tfds_path
SCREAMING_SNAKE_CASE : Optional[int] = datasets_directory
def __A ( self : Dict ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
SCREAMING_SNAKE_CASE : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
SCREAMING_SNAKE_CASE : str = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Dict = {}
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE : List[str] = os.listdir(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not os.path.isfile(UpperCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.readlines()
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = []
for line in lines:
SCREAMING_SNAKE_CASE : List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE : List[str] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE : Any = '''from datasets import logging\n'''
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE : Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda UpperCamelCase__ : e in out_line , UpperCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase__ ) + '''\n''' )
out_lines.append(UpperCamelCase__ )
out_lines.append(UpperCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE : Any = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE : Optional[int] = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
SCREAMING_SNAKE_CASE : List[Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE : Optional[int] = True
out_lines.append(UpperCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE : Dict = f_name.replace('''.py''' , '''''' )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase__ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase__ )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE : Tuple = os.path.basename(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(UpperCamelCase__ , UpperCamelCase__ )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 248 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645 | """simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''conditional_detr'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Union[str, Any] , A_ : List[Any]=True , A_ : Dict=None , A_ : Any=3 , A_ : int=300 , A_ : Dict=6 , A_ : Any=2048 , A_ : Tuple=8 , A_ : Union[str, Any]=6 , A_ : Optional[Any]=2048 , A_ : Optional[int]=8 , A_ : List[Any]=0.0 , A_ : Any=0.0 , A_ : Tuple=True , A_ : Dict="relu" , A_ : Dict=256 , A_ : Optional[int]=0.1 , A_ : Tuple=0.0 , A_ : Any=0.0 , A_ : List[str]=0.02 , A_ : int=1.0 , A_ : Optional[int]=False , A_ : int="sine" , A_ : Tuple="resnet50" , A_ : Optional[Any]=True , A_ : Dict=False , A_ : Union[str, Any]=2 , A_ : str=5 , A_ : Union[str, Any]=2 , A_ : List[Any]=1 , A_ : List[Any]=1 , A_ : List[Any]=2 , A_ : Optional[Any]=5 , A_ : Optional[Any]=2 , A_ : Optional[int]=0.25 , **A_ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCamelCase_ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(A_ , A_ ):
lowerCamelCase_ = backbone_config.get('model_type' )
lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ = config_class.from_dict(A_ )
lowerCamelCase_ = use_timm_backbone
lowerCamelCase_ = backbone_config
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_queries
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = init_xavier_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = auxiliary_loss
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = backbone
lowerCamelCase_ = use_pretrained_backbone
lowerCamelCase_ = dilation
# Hungarian matcher
lowerCamelCase_ = class_cost
lowerCamelCase_ = bbox_cost
lowerCamelCase_ = giou_cost
# Loss coefficients
lowerCamelCase_ = mask_loss_coefficient
lowerCamelCase_ = dice_loss_coefficient
lowerCamelCase_ = cls_loss_coefficient
lowerCamelCase_ = bbox_loss_coefficient
lowerCamelCase_ = giou_loss_coefficient
lowerCamelCase_ = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.d_model
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def a__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1E-5
@property
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return 12
| 70 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE_ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
SCREAMING_SNAKE_CASE_ = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
SCREAMING_SNAKE_CASE_ = "|".join(sys.argv[1:])
SCREAMING_SNAKE_CASE_ = re.compile(rF"^({joined_dirs}).*?\.py$")
SCREAMING_SNAKE_CASE_ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 597 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Optional[Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
_snake_case : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _A ( __snake_case :str ) -> Tuple:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__SCREAMING_SNAKE_CASE = model_type_to_module_name(__snake_case )
__SCREAMING_SNAKE_CASE = importlib.import_module(f'''.{module_name}''' , "transformers.models" )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__snake_case , "__name__" , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__SCREAMING_SNAKE_CASE = importlib.import_module("transformers" )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def _A ( __snake_case :Union[str, os.PathLike] , __snake_case :Optional[Union[str, os.PathLike]] = None , __snake_case :bool = False , __snake_case :bool = False , __snake_case :Optional[Dict[str, str]] = None , __snake_case :Optional[Union[bool, str]] = None , __snake_case :Optional[str] = None , __snake_case :bool = False , **__snake_case :Dict , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(__snake_case , encoding="utf-8" ) as reader:
return json.load(__snake_case )
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Dict:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_a )
def __lowerCAmelCase ( cls, _a, **_a ) -> List[str]:
__SCREAMING_SNAKE_CASE = kwargs.pop("config", _a )
__SCREAMING_SNAKE_CASE = kwargs.pop("trust_remote_code", _a )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FeatureExtractionMixin.get_feature_extractor_dict(_a, **_a )
__SCREAMING_SNAKE_CASE = config_dict.get("feature_extractor_type", _a )
__SCREAMING_SNAKE_CASE = None
if "AutoFeatureExtractor" in config_dict.get("auto_map", {} ):
__SCREAMING_SNAKE_CASE = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a, **_a )
# It could be in `config.feature_extractor_type``
__SCREAMING_SNAKE_CASE = getattr(_a, "feature_extractor_type", _a )
if hasattr(_a, "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
__SCREAMING_SNAKE_CASE = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
__SCREAMING_SNAKE_CASE = feature_extractor_class_from_name(_a )
__SCREAMING_SNAKE_CASE = feature_extractor_auto_map is not None
__SCREAMING_SNAKE_CASE = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
__SCREAMING_SNAKE_CASE = resolve_trust_remote_code(
_a, _a, _a, _a )
if has_remote_code and trust_remote_code:
__SCREAMING_SNAKE_CASE = get_class_from_dynamic_module(
_a, _a, **_a )
__SCREAMING_SNAKE_CASE = kwargs.pop("code_revision", _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a, **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a, **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
__SCREAMING_SNAKE_CASE = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a, **_a )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __lowerCAmelCase ( _a, _a ) -> List[Any]:
FEATURE_EXTRACTOR_MAPPING.register(_a, _a )
| 214 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""time_series_transformer"""
SCREAMING_SNAKE_CASE__ ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self, _a = None, _a = None, _a = "student_t", _a = "nll", _a = 1, _a = [1, 2, 3, 4, 5, 6, 7], _a = "mean", _a = 0, _a = 0, _a = 0, _a = 0, _a = None, _a = None, _a = 32, _a = 32, _a = 2, _a = 2, _a = 2, _a = 2, _a = True, _a = "gelu", _a = 64, _a = 0.1, _a = 0.1, _a = 0.1, _a = 0.1, _a = 0.1, _a = 1_00, _a = 0.02, _a=True, **_a, ) -> Optional[Any]:
# time series specific configuration
__SCREAMING_SNAKE_CASE = prediction_length
__SCREAMING_SNAKE_CASE = context_length or prediction_length
__SCREAMING_SNAKE_CASE = distribution_output
__SCREAMING_SNAKE_CASE = loss
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = num_time_features
__SCREAMING_SNAKE_CASE = lags_sequence
__SCREAMING_SNAKE_CASE = scaling
__SCREAMING_SNAKE_CASE = num_dynamic_real_features
__SCREAMING_SNAKE_CASE = num_static_real_features
__SCREAMING_SNAKE_CASE = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__SCREAMING_SNAKE_CASE = cardinality
else:
__SCREAMING_SNAKE_CASE = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__SCREAMING_SNAKE_CASE = embedding_dimension
else:
__SCREAMING_SNAKE_CASE = [min(50, (cat + 1) // 2 ) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE = input_size * len(_a ) + self._number_of_features
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(is_encoder_decoder=_a, **_a )
@property
def __lowerCAmelCase ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 214 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Union[str, Any]:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def UpperCamelCase_ ( __a , __a , __a , __a , __a=True ) -> str:
model.train()
a__ : str = model(__a )
a__ : Dict = F.mse_loss(__a , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__a )
def UpperCamelCase_ ( __a , __a=False ) -> str:
set_seed(42 )
a__ : int = RegressionModel()
a__ : Optional[int] = deepcopy(__a )
a__ : List[str] = RegressionDataset(length=80 )
a__ : Tuple = DataLoader(__a , batch_size=16 )
model.to(accelerator.device )
if sched:
a__ : str = AdamW(params=model.parameters() , lr=1e-3 )
a__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
a__ : List[str] = LambdaLR(__a , lr_lambda=lambda __a : epoch**0.65 )
a__ : Optional[Any] = LambdaLR(__a , lr_lambda=lambda __a : epoch**0.65 )
# Make a copy of `model`
if sched:
a__, a__, a__, a__ : Optional[Any] = accelerator.prepare(__a , __a , __a , __a )
else:
a__, a__ : Optional[Any] = accelerator.prepare(__a , __a )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase_ ( __a ) -> Tuple:
# Test when on a single CPU or GPU that the context manager does nothing
a__, a__, a__ : Dict = get_training_setup(__a )
# Use a single batch
a__, a__ : Optional[Any] = next(iter(__a ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a__, a__ : str = accelerator.gather((ddp_input, ddp_target) )
a__, a__ : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__a , __a , __a , __a )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__a ):
step_model(__a , __a , __a , __a )
else:
# Sync grads
step_model(__a , __a , __a , __a )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__a , __a , __a , __a )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
a__ : Any = ddp_input[torch.randperm(len(__a ) )]
def UpperCamelCase_ ( __a ) -> Optional[Any]:
# Test on distributed setup that context manager behaves properly
a__, a__, a__ : List[str] = get_training_setup(__a )
# Use a single batch
a__, a__ : Optional[int] = next(iter(__a ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a__, a__ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) )
a__, a__ : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__a , __a , __a , __a )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__a ):
step_model(__a , __a , __a , __a )
else:
# Sync grads
step_model(__a , __a , __a , __a )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
a__ : Dict = ddp_input[torch.randperm(len(__a ) )]
def UpperCamelCase_ ( __a=False , __a=False ) -> Optional[Any]:
a__ : List[str] = Accelerator(
split_batches=__a , dispatch_batches=__a , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a__, a__, a__ : Optional[int] = get_training_setup(__a )
for iteration, batch in enumerate(__a ):
a__, a__ : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
a__, a__ : List[Any] = accelerator.gather((ddp_input, ddp_target) )
a__, a__ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__a , __a , __a , __a , __a )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__a ):
step_model(__a , __a , __a , __a )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__a ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
a__ : Union[str, Any] = ddp_input[torch.randperm(len(__a ) )]
GradientState._reset_state()
def UpperCamelCase_ ( __a=False , __a=False ) -> int:
a__ : List[Any] = Accelerator(
split_batches=__a , dispatch_batches=__a , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a__, a__, a__, a__, a__, a__, a__ : Any = get_training_setup(__a , __a )
for iteration, batch in enumerate(__a ):
a__, a__ : Tuple = batch.values()
# Gather the distributed inputs and targs for the base model
a__, a__ : List[str] = accelerator.gather((ddp_input, ddp_target) )
a__, a__ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__a , __a , __a , __a , __a )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__a )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__a ):
step_model(__a , __a , __a , __a )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
a__ : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__a ))
if accelerator.num_processes > 1:
check_model_parameters(__a , __a , __a , __a )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def UpperCamelCase_ ( ) -> List[str]:
a__ : Dict = Accelerator()
a__ : Dict = RegressionDataset(length=80 )
a__ : Optional[Any] = DataLoader(__a , batch_size=16 )
a__ : Optional[int] = RegressionDataset(length=96 )
a__ : Dict = DataLoader(__a , batch_size=16 )
a__, a__ : Optional[int] = accelerator.prepare(__a , __a )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__a ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__a )
if iteration < len(__a ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__a ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__a )
if batch_num < len(__a ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase_ ( ) -> List[Any]:
a__ : Optional[Any] = Accelerator()
a__ : Optional[Any] = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__a )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__a )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__a , __a )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__a , __a )
def UpperCamelCase_ ( __a ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 37 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= 'deit'
def __init__( self : Union[str, Any] , _lowercase : List[str]=7_68 , _lowercase : Optional[Any]=12 , _lowercase : Optional[int]=12 , _lowercase : Dict=30_72 , _lowercase : Optional[Any]="gelu" , _lowercase : Optional[Any]=0.0 , _lowercase : List[str]=0.0 , _lowercase : Union[str, Any]=0.0_2 , _lowercase : Optional[int]=1E-12 , _lowercase : Dict=2_24 , _lowercase : List[str]=16 , _lowercase : str=3 , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=16 , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**_lowercase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = encoder_stride
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= version.parse('1.11' )
@property
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
return 1E-4
| 475 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __UpperCAmelCase ( UpperCamelCase__ :Dict , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[str] ) -> np.array:
snake_case__ : List[str] = int(np.ceil((x_end - xa) / step_size ) )
snake_case__ : Tuple = np.zeros((n + 1,) )
snake_case__ : Optional[int] = ya
snake_case__ : int = xa
for k in range(__lowerCAmelCase ):
snake_case__ : Union[str, Any] = y[k] + step_size * ode_func(__lowerCAmelCase , y[k] )
snake_case__ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(__lowerCAmelCase , y[k] ) + ode_func(x + step_size , __lowerCAmelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _UpperCamelCase ( __UpperCamelCase ) -> list[tuple[int, int]]:
lowerCamelCase_ = 0
lowerCamelCase_ = len(__UpperCamelCase ) # No of vertices in graph
lowerCamelCase_ = [0] * n
lowerCamelCase_ = [False] * n
def dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = True
lowerCamelCase_ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,id_ )
lowerCamelCase_ = min(low[at] ,low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase_ = min(low[at] ,low[to] )
lowerCamelCase_ = []
for i in range(__UpperCamelCase ):
if not visited[i]:
dfs(__UpperCamelCase ,-1 ,__UpperCamelCase ,id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __snake_case ( SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if (ksize % 2) == 0:
_lowerCAmelCase = ksize + 1
_lowerCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
# distance from center
_lowerCAmelCase = x - ksize // 2
_lowerCAmelCase = y - ksize // 2
# degree to radiant
_lowerCAmelCase = theta / 180 * np.pi
_lowerCAmelCase = np.cos(_theta )
_lowerCAmelCase = np.sin(_theta )
# get kernel x
_lowerCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
_snake_case = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 2_5_5
_snake_case = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 580 | 0 |
import argparse
from collections import defaultdict
import yaml
lowerCamelCase_ = "docs/source/en/_toctree.yml"
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =defaultdict(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =[]
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =new_doc_list
SCREAMING_SNAKE_CASE__ =[key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE__ =[]
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE__ =list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
SCREAMING_SNAKE_CASE__ =sorted(__UpperCamelCase, key=lambda __UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__UpperCamelCase )
# Sort
return overview_doc
def UpperCAmelCase_ ( __UpperCamelCase=False ):
with open(__UpperCamelCase, encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ =yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE__ =0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE__ =content[api_idx]["""sections"""]
# Then to the model doc
SCREAMING_SNAKE_CASE__ =0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
SCREAMING_SNAKE_CASE__ =api_doc[scheduler_idx]["""sections"""]
SCREAMING_SNAKE_CASE__ =clean_doc_toc(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =False
if new_scheduler_doc != scheduler_doc:
SCREAMING_SNAKE_CASE__ =True
if overwrite:
SCREAMING_SNAKE_CASE__ =new_scheduler_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE__ =api_doc
with open(__UpperCamelCase, """w""", encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase, allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def UpperCAmelCase_ ( __UpperCamelCase=False ):
with open(__UpperCamelCase, encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ =yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE__ =0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE__ =content[api_idx]["""sections"""]
# Then to the model doc
SCREAMING_SNAKE_CASE__ =0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =api_doc[pipeline_idx]["""sections"""]
SCREAMING_SNAKE_CASE__ =[]
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
SCREAMING_SNAKE_CASE__ =pipeline_doc["""section"""]
SCREAMING_SNAKE_CASE__ =clean_doc_toc(__UpperCamelCase )
if overwrite:
SCREAMING_SNAKE_CASE__ =new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCamelCase )
# sort overall pipeline doc
SCREAMING_SNAKE_CASE__ =clean_doc_toc(__UpperCamelCase )
if new_pipeline_docs != pipeline_docs:
SCREAMING_SNAKE_CASE__ =True
if overwrite:
SCREAMING_SNAKE_CASE__ =new_pipeline_docs
if diff:
if overwrite:
SCREAMING_SNAKE_CASE__ =api_doc
with open(__UpperCamelCase, """w""", encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase, allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 588 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =nn.functional.normalize(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase, normalized_text_embeds.t() )
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : Optional[Any] = CLIPConfig
_A : Dict = ["CLIPEncoderLayer"]
def __init__( self : Tuple ,_UpperCamelCase : CLIPConfig ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =CLIPVisionModel(config.vision_config )
SCREAMING_SNAKE_CASE__ =nn.Linear(config.vision_config.hidden_size ,config.projection_dim ,bias=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.ones(1_7 ,config.projection_dim ) ,requires_grad=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.ones(3 ,config.projection_dim ) ,requires_grad=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.ones(1_7 ) ,requires_grad=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.ones(3 ) ,requires_grad=_UpperCamelCase )
@torch.no_grad()
def __A ( self : Dict ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.vision_model(_UpperCamelCase )[1] # pooled_output
SCREAMING_SNAKE_CASE__ =self.visual_projection(_UpperCamelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE__ =cosine_distance(_UpperCamelCase ,self.special_care_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE__ =cosine_distance(_UpperCamelCase ,self.concept_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =image_embeds.shape[0]
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE__ =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
SCREAMING_SNAKE_CASE__ =special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE__ =self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE__ =round(concept_cos - concept_threshold + adjustment ,3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
SCREAMING_SNAKE_CASE__ =0.01
for concept_idx in range(len(cos_dist[0] ) ):
SCREAMING_SNAKE_CASE__ =cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE__ =self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE__ =round(concept_cos - concept_threshold + adjustment ,3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCamelCase )
result.append(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __A ( self : str ,_UpperCamelCase : torch.FloatTensor ,_UpperCamelCase : torch.FloatTensor ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.vision_model(_UpperCamelCase )[1] # pooled_output
SCREAMING_SNAKE_CASE__ =self.visual_projection(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =cosine_distance(_UpperCamelCase ,self.special_care_embeds )
SCREAMING_SNAKE_CASE__ =cosine_distance(_UpperCamelCase ,self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE__ =0.0
SCREAMING_SNAKE_CASE__ =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE__ =torch.any(special_scores > 0 ,dim=1 )
SCREAMING_SNAKE_CASE__ =special_care * 0.01
SCREAMING_SNAKE_CASE__ =special_adjustment.unsqueeze(1 ).expand(-1 ,cos_dist.shape[1] )
SCREAMING_SNAKE_CASE__ =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE__ =torch.any(concept_scores > 0 ,dim=1 )
return images, has_nsfw_concepts
| 588 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.