code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = (DDIMParallelScheduler,)
_lowerCamelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def UpperCAmelCase__ ( self , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def UpperCAmelCase__ ( self , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : Dict = self.get_scheduler_config(**_lowercase )
snake_case_ : Optional[Any] = scheduler_class(**_lowercase )
snake_case_ , snake_case_ : int = 1_0, 0.0
snake_case_ : Any = self.dummy_model()
snake_case_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for t in scheduler.timesteps:
snake_case_ : Dict = model(_lowercase , _lowercase )
snake_case_ : Any = scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase ).prev_sample
return sample
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase )
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config(steps_offset=1 )
snake_case_ : Any = scheduler_class(**_lowercase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=_lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=_lowercase , num_inference_steps=_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowercase , eta=_lowercase )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scheduler_classes[0]
snake_case_ : List[str] = self.get_scheduler_config()
snake_case_ : str = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.scheduler_classes[0]
snake_case_ : str = self.get_scheduler_config()
snake_case_ : Any = scheduler_class(**_lowercase )
snake_case_ , snake_case_ : List[str] = 1_0, 0.0
scheduler.set_timesteps(_lowercase )
snake_case_ : Tuple = self.dummy_model()
snake_case_ : List[str] = self.dummy_sample_deter
snake_case_ : List[str] = self.dummy_sample_deter + 0.1
snake_case_ : List[Any] = self.dummy_sample_deter - 0.1
snake_case_ : Tuple = samplea.shape[0]
snake_case_ : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case_ : int = torch.arange(_lowercase )[0:3, None].repeat(1 , _lowercase )
snake_case_ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case_ : Optional[Any] = scheduler.batch_step_no_noise(_lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowercase )
snake_case_ : Dict = torch.sum(torch.abs(_lowercase ) )
snake_case_ : List[str] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.full_loop()
snake_case_ : Tuple = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Optional[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.full_loop(prediction_type="""v_prediction""" )
snake_case_ : Tuple = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Dict = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
snake_case_ : Any = torch.sum(torch.abs(_lowercase ) )
snake_case_ : List[str] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
snake_case_ : Optional[Any] = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Dict = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 58 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : list[int] ):
'''simple docstring'''
snake_case_ : Dict = len(__UpperCamelCase ) // 2
# choose the middle 3 elements
snake_case_ : int = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58 | 1 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = 1.5
snake_case_ : Any = int(factor * num_class_images )
snake_case_ : Dict = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=__UpperCamelCase )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ : Optional[int] = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
snake_case_ : int = int(factor * num_images )
snake_case_ : List[Any] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
snake_case_ : Dict = 0
snake_case_ : List[Any] = 0
snake_case_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F'{class_data_dir}/caption.txt' , """w""" ) as fa, open(F'{class_data_dir}/urls.txt' , """w""" ) as fa, open(
F'{class_data_dir}/images.txt' , """w""" ) as fa:
while total < num_class_images:
snake_case_ : Dict = class_images[count]
count += 1
try:
snake_case_ : List[Any] = requests.get(images["""url"""] )
if img.status_code == 2_0_0:
snake_case_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=2_0_0 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
__lowerCAmelCase : str = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 58 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''roformer'''
def __init__( self , _lowercase=5_0_0_0_0 , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_5_3_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase=False , _lowercase=True , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = vocab_size
snake_case_ : Any = hidden_size if embedding_size is None else embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : List[str] = rotary_value
snake_case_ : str = use_cache
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[int] = len(__UpperCamelCase )
snake_case_ : List[Any] = []
for i in range(len(__UpperCamelCase ) - pat_len + 1 ):
snake_case_ : Dict = True
for j in range(__UpperCamelCase ):
if s[i + j] != pattern[j]:
snake_case_ : Optional[int] = False
break
if match_found:
position.append(__UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 58 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Dict = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : Tuple = flatten_dict(__UpperCamelCase )
return flax_params
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
snake_case_ : Optional[Any] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Optional[int] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Dict = flax_dict[key]
snake_case_ : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = get_flax_param(__UpperCamelCase )
if not use_large:
snake_case_ : Optional[int] = PixaStructVisionConfig()
snake_case_ : Optional[Any] = PixaStructTextConfig()
else:
snake_case_ : Tuple = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
snake_case_ : List[str] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
snake_case_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
snake_case_ : Optional[int] = PixaStructForConditionalGeneration(__UpperCamelCase )
snake_case_ : str = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
snake_case_ : int = PixaStructImageProcessor()
snake_case_ : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : int = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("""Model saved in {}""".format(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 58 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__UpperCamelCase ) * abs(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 58 | 1 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = 0
snake_case_ : Union[str, Any] = [0]
snake_case_ : Optional[int] = [0]
snake_case_ : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
snake_case_ : Any = [6_0]
snake_case_ : List[str] = [1_0]
snake_case_ : Optional[int] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = 3
snake_case_ : int = [1, 2, 3]
snake_case_ : Dict = [3, 2, 1]
snake_case_ : List[str] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = 5_0
snake_case_ : List[str] = [6_0, 1_0_0, 1_2_0]
snake_case_ : Tuple = [1_0, 2_0, 3_0]
snake_case_ : List[str] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 58 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionInpaintPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
snake_case_ : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case_ : Dict = CLIPTextModel(_lowercase )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : str = torch.manual_seed(_lowercase )
else:
snake_case_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**_lowercase )
snake_case_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowercase )
snake_case_ : List[str] = sd_pipe(**_lowercase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Optional[int] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = PNDMScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 58 | 1 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : List[str] = set_counts
snake_case_ : str = max(_lowercase )
snake_case_ : Any = len(_lowercase )
snake_case_ : Any = [1] * num_sets
snake_case_ : List[Any] = list(range(_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> bool:
'''simple docstring'''
snake_case_ : List[str] = self.get_parent(_lowercase )
snake_case_ : Tuple = self.get_parent(_lowercase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
snake_case_ : Tuple = 0
snake_case_ : Any = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
snake_case_ : Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
snake_case_ : int = 0
snake_case_ : Optional[Any] = src_parent
snake_case_ : Union[str, Any] = self.set_counts[src_parent]
snake_case_ : Union[str, Any] = max(self.max_set , _lowercase )
return True
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
snake_case_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 58 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''resnet'''
_lowerCamelCase = ['''basic''', '''bottleneck''']
def __init__( self , _lowercase=3 , _lowercase=6_4 , _lowercase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowercase=[3, 4, 6, 3] , _lowercase="bottleneck" , _lowercase="relu" , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
snake_case_ : Union[str, Any] = num_channels
snake_case_ : str = embedding_size
snake_case_ : List[Any] = hidden_sizes
snake_case_ : Optional[Any] = depths
snake_case_ : Optional[int] = layer_type
snake_case_ : int = hidden_act
snake_case_ : Any = downsample_in_first_stage
snake_case_ : int = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : List[Any] = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-3
| 58 |
"""simple docstring"""
__lowerCAmelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : Union[str, Any] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bridgetower_vision_model'''
def __init__( self , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=3 , _lowercase=1_6 , _lowercase=2_8_8 , _lowercase=1 , _lowercase=1E-05 , _lowercase=False , _lowercase=True , _lowercase=False , **_lowercase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Union[str, Any] = num_channels
snake_case_ : List[Any] = patch_size
snake_case_ : int = image_size
snake_case_ : Tuple = initializer_factor
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Optional[int] = stop_gradient
snake_case_ : Optional[Any] = share_layernorm
snake_case_ : Tuple = remove_last_layer
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ : Any = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("""model_type""" ) == "bridgetower":
snake_case_ : Any = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bridgetower_text_model'''
def __init__( self , _lowercase=5_0_2_6_5 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=1 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_4 , _lowercase=1 , _lowercase=1E-05 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = initializer_factor
snake_case_ : List[Any] = intermediate_size
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : int = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : List[str] = pad_token_id
snake_case_ : int = bos_token_id
snake_case_ : Dict = eos_token_id
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("""model_type""" ) == "bridgetower":
snake_case_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bridgetower'''
def __init__( self , _lowercase=True , _lowercase="gelu" , _lowercase=7_6_8 , _lowercase=1 , _lowercase=1E-05 , _lowercase=False , _lowercase="add" , _lowercase=1_2 , _lowercase=6 , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = kwargs.pop("""text_config_dict""" , _lowercase )
snake_case_ : List[str] = kwargs.pop("""vision_config_dict""" , _lowercase )
super().__init__(**_lowercase )
snake_case_ : str = share_cross_modal_transformer_layers
snake_case_ : int = hidden_act
snake_case_ : Tuple = hidden_size
snake_case_ : List[Any] = initializer_factor
snake_case_ : Dict = layer_norm_eps
snake_case_ : str = share_link_tower_layers
snake_case_ : int = link_tower_type
snake_case_ : str = num_attention_heads
snake_case_ : int = num_hidden_layers
snake_case_ : Tuple = tie_word_embeddings
snake_case_ : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
snake_case_ : Optional[int] = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
snake_case_ : str = BridgeTowerTextConfig(**_lowercase )
snake_case_ : Any = BridgeTowerVisionConfig(**_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , _lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : int = self.text_config.to_dict()
snake_case_ : str = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 58 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 | 1 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = psutil.Process()
snake_case_ : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Dict = -1
while True:
snake_case_ : Any = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Any = True
snake_case_ : Union[str, Any] = threading.Thread(target=self.peak_monitor )
snake_case_ : Union[str, Any] = True
self.thread.start()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = False
self.thread.join()
return self.cpu_memory_peak
__lowerCAmelCase : List[Any] = PeakCPUMemory()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case_ : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case_ : int = torch.cuda.memory_allocated(__UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case_ : Optional[int] = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**2_0
snake_case_ : Optional[Any] = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**2_0
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case_ : List[Any] = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**2_0
snake_case_ : Optional[Any] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**2_0
return measures
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Dict ):
'''simple docstring'''
print(F'{description}:' )
print(F'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(F'- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB' )
snake_case_ : List[Any] = measures[F'{i}-peak']
print(F'- GPU {i} peak: {peak:.2f}MiB' )
print(F'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(F'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=[1, 1, 2] , _lowercase=1 , _lowercase=3_2 , _lowercase=4 , _lowercase=8 , _lowercase=3_7 , _lowercase="gelu_new" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=5_1_2 , _lowercase=3 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , _lowercase=False , ) -> str:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Tuple = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Any = use_input_mask
snake_case_ : List[Any] = use_token_type_ids
snake_case_ : Any = use_labels
snake_case_ : Tuple = vocab_size
snake_case_ : Tuple = block_sizes
snake_case_ : Optional[int] = num_decoder_layers
snake_case_ : Union[str, Any] = d_model
snake_case_ : Any = n_head
snake_case_ : Optional[int] = d_head
snake_case_ : Dict = d_inner
snake_case_ : List[Any] = hidden_act
snake_case_ : str = hidden_dropout
snake_case_ : int = attention_dropout
snake_case_ : List[Any] = activation_dropout
snake_case_ : List[str] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = 2
snake_case_ : Any = num_labels
snake_case_ : int = num_choices
snake_case_ : Union[str, Any] = scope
snake_case_ : int = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case_ : Union[str, Any] = n_head
# Used in the tests to check the size of the first hidden state
snake_case_ : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case_ : Tuple = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case_ : int = self.num_hidden_layers + 2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : int = None
if self.use_input_mask:
snake_case_ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : int = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Dict = None
snake_case_ : Dict = None
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Union[str, Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = TFFunnelModel(config=_lowercase )
snake_case_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Dict = model(_lowercase )
snake_case_ : List[Any] = [input_ids, input_mask]
snake_case_ : List[str] = model(_lowercase )
snake_case_ : List[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ : Optional[int] = False
snake_case_ : List[str] = TFFunnelModel(config=_lowercase )
snake_case_ : int = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ : List[Any] = False
snake_case_ : Optional[int] = TFFunnelModel(config=_lowercase )
snake_case_ : Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = TFFunnelBaseModel(config=_lowercase )
snake_case_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Tuple = model(_lowercase )
snake_case_ : Optional[Any] = [input_ids, input_mask]
snake_case_ : Any = model(_lowercase )
snake_case_ : Any = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case_ : List[Any] = False
snake_case_ : List[str] = TFFunnelBaseModel(config=_lowercase )
snake_case_ : str = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case_ : List[str] = False
snake_case_ : Any = TFFunnelBaseModel(config=_lowercase )
snake_case_ : List[str] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = TFFunnelForPreTraining(config=_lowercase )
snake_case_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : List[Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = TFFunnelForMaskedLM(config=_lowercase )
snake_case_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.num_labels
snake_case_ : List[str] = TFFunnelForSequenceClassification(config=_lowercase )
snake_case_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = self.num_choices
snake_case_ : List[str] = TFFunnelForMultipleChoice(config=_lowercase )
snake_case_ : Union[str, Any] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Optional[Any] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Union[str, Any] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
snake_case_ : int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case_ : str = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.num_labels
snake_case_ : int = TFFunnelForTokenClassification(config=_lowercase )
snake_case_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Dict = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = TFFunnelForQuestionAnswering(config=_lowercase )
snake_case_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Union[str, Any] = config_and_inputs
snake_case_ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = TFFunnelModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
@require_tf
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = TFFunnelModelTester(self , base=_lowercase )
snake_case_ : List[str] = ConfigTester(self , config_class=_lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
| 58 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''CLIPImageProcessor'''
_lowerCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _lowercase=None , _lowercase=None , **_lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowercase , )
snake_case_ : List[str] = kwargs.pop("""feature_extractor""" )
snake_case_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowercase , _lowercase )
def __call__( self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ) -> str:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case_ : Any = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
snake_case_ : List[Any] = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
snake_case_ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowercase , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowercase , )
return self.image_processor
| 58 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58 | 1 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__lowerCAmelCase : Any = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[int] = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : List[str] = []
if args.gold_data_mode == "qa":
snake_case_ : Optional[int] = pd.read_csv(__UpperCamelCase , sep="""\t""" , header=__UpperCamelCase )
for answer_list in data[1]:
snake_case_ : Dict = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
snake_case_ : Union[str, Any] = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : Dict = [[reference] for reference in references]
snake_case_ : str = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Union[str, Any] = 100.0 * em / total
snake_case_ : Optional[Any] = 100.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = args.k
snake_case_ : int = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : Dict = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : Any = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Dict = set(hypo.split("""\t""" )[:k] )
snake_case_ : List[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case_ : Dict = 100.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
def strip_title(__UpperCamelCase : str ):
if title.startswith("""\"""" ):
snake_case_ : Union[str, Any] = title[1:]
if title.endswith("""\"""" ):
snake_case_ : Union[str, Any] = title[:-1]
return title
snake_case_ : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase , truncation=__UpperCamelCase , )["""input_ids"""].to(args.device )
snake_case_ : int = rag_model.rag.question_encoder(__UpperCamelCase )
snake_case_ : Union[str, Any] = question_enc_outputs[0]
snake_case_ : Dict = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
snake_case_ : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case_ : Optional[Any] = []
for docs in all_docs:
snake_case_ : List[Any] = [strip_title(__UpperCamelCase ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__UpperCamelCase ) )
return provenance_strings
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
snake_case_ : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase , truncation=__UpperCamelCase )
snake_case_ : List[Any] = inputs_dict.input_ids.to(args.device )
snake_case_ : Optional[int] = inputs_dict.attention_mask.to(args.device )
snake_case_ : Any = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case_ : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info("""Q: {} - A: {}""".format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__UpperCamelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__UpperCamelCase , choices=["""exact""", """compressed""", """legacy"""] , type=__UpperCamelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__UpperCamelCase , type=__UpperCamelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__UpperCamelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__UpperCamelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__UpperCamelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__UpperCamelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__UpperCamelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__UpperCamelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__UpperCamelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__UpperCamelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=5_0 , type=__UpperCamelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
snake_case_ : Dict = parser.parse_args()
snake_case_ : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
if args.model_type is None:
snake_case_ : Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
snake_case_ : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
snake_case_ : Tuple = args.n_docs
if args.index_name is not None:
snake_case_ : Tuple = args.index_name
if args.index_path is not None:
snake_case_ : Any = args.index_path
else:
snake_case_ : Optional[Any] = BartForConditionalGeneration
snake_case_ : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
snake_case_ : int = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__UpperCamelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
snake_case_ : Any = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
snake_case_ : Any = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
snake_case_ : int = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
snake_case_ : List[Any] = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
snake_case_ : Tuple = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write("""\n""".join(__UpperCamelCase ) + """\n""" )
preds_file.flush()
snake_case_ : Dict = []
if len(__UpperCamelCase ) > 0:
snake_case_ : List[str] = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write("""\n""".join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = get_args()
main(args)
| 58 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : list[list[int]] = []
create_all_state(1 , __UpperCamelCase , __UpperCamelCase , [] , __UpperCamelCase )
return result
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] , __UpperCamelCase : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__UpperCamelCase , total_number - level + 2 ):
current_list.append(__UpperCamelCase )
create_all_state(i + 1 , __UpperCamelCase , level - 1 , __UpperCamelCase , __UpperCamelCase )
current_list.pop()
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Any = 4
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 58 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = FunnelConfig.from_json_file(__UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
snake_case_ : Dict = FunnelBaseModel(__UpperCamelCase ) if base_model else FunnelModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 58 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
snake_case_ : str = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
sd_pipe.set_scheduler("""sample_euler""" )
snake_case_ : List[str] = """A painting of a squirrel eating a burger"""
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : List[str] = sd_pipe([prompt] , generator=_lowercase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ : Dict = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ : Optional[int] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
sd_pipe.set_scheduler("""sample_euler""" )
snake_case_ : List[Any] = """A painting of a squirrel eating a burger"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = sd_pipe([prompt] , generator=_lowercase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ : Any = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ : Union[str, Any] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
snake_case_ : List[str] = """A painting of a squirrel eating a burger"""
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : Any = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , num_inference_steps=1_5 , output_type="""np""" , use_karras_sigmas=_lowercase , )
snake_case_ : str = output.images
snake_case_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ : Union[str, Any] = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
if curr_ind == len(__UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCamelCase ) ):
if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Insert current vertex into path as next transition
snake_case_ : List[str] = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ : Tuple = -1
return False
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ):
'''simple docstring'''
snake_case_ : Tuple = [-1] * (len(__UpperCamelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : dict ):
'''simple docstring'''
snake_case_ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case_ : set[int] = set()
return any(
node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for node in graph )
def __lowerCAmelCase ( __UpperCamelCase : dict , __UpperCamelCase : int , __UpperCamelCase : set , __UpperCamelCase : set ):
'''simple docstring'''
visited.add(__UpperCamelCase )
rec_stk.add(__UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
# add QFormer tokenizer
snake_case_ : List[str] = qformer_tokenizer
def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case_ : Optional[Any] = BatchFeature()
if text is not None:
snake_case_ : List[str] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase )
snake_case_ : Union[str, Any] = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" )
snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_lowercase )
return super().save_pretrained(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" )
snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase )
args.append(_lowercase )
return cls(*_lowercase )
| 58 | 1 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = None
snake_case_ : List[Any] = None
snake_case_ : Tuple = graph
self._normalize_graph(_lowercase , _lowercase )
snake_case_ : Optional[Any] = len(_lowercase )
snake_case_ : List[str] = None
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
if sources is int:
snake_case_ : int = [sources]
if sinks is int:
snake_case_ : Any = [sinks]
if len(_lowercase ) == 0 or len(_lowercase ) == 0:
return
snake_case_ : Union[str, Any] = sources[0]
snake_case_ : int = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_lowercase ) > 1 or len(_lowercase ) > 1:
snake_case_ : Dict = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
snake_case_ : List[str] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
snake_case_ : List[str] = max_input_flow
snake_case_ : Tuple = 0
snake_case_ : List[str] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
snake_case_ : Tuple = max_input_flow
snake_case_ : Dict = size - 1
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = algorithm(self )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = flow_network
snake_case_ : str = flow_network.verticesCount
snake_case_ : Optional[int] = flow_network.sourceIndex
snake_case_ : Optional[Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
snake_case_ : int = flow_network.graph
snake_case_ : str = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
if not self.executed:
self._algorithm()
snake_case_ : Optional[Any] = True
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
# use this to save your result
snake_case_ : Any = -1
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : List[str] = [[0] * self.verticies_count for i in range(self.verticies_count )]
snake_case_ : List[str] = [0] * self.verticies_count
snake_case_ : List[Any] = [0] * self.verticies_count
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
snake_case_ : Any = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
snake_case_ : int = 0
while i < len(_lowercase ):
snake_case_ : Tuple = vertices_list[i]
snake_case_ : List[str] = self.heights[vertex_index]
self.process_vertex(_lowercase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_lowercase ) )
snake_case_ : Dict = 0
else:
i += 1
snake_case_ : Any = sum(self.preflow[self.source_index] )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_lowercase , _lowercase )
self.relabel(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : str = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
snake_case_ : str = self.heights[to_index]
if min_height is not None:
snake_case_ : List[str] = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int = [0]
__lowerCAmelCase : List[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : str = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Dict = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[Any] = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 58 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Tuple = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( __UpperCamelCase : str = "" ):
'''simple docstring'''
snake_case_ : Optional[int] = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case_ : Dict = BeautifulSoup(requests.get(__UpperCamelCase ).text , """html.parser""" )
snake_case_ : str = soup.find_all("""td""" , attrs="""titleColumn""" )
snake_case_ : Union[str, Any] = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__UpperCamelCase , __UpperCamelCase )
}
def __lowerCAmelCase ( __UpperCamelCase : str = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
snake_case_ : int = get_imdb_top_aaa_movies()
with open(__UpperCamelCase , """w""" , newline="""""" ) as out_file:
snake_case_ : str = csv.writer(__UpperCamelCase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 58 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 | 1 |
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 58 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58 | 1 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__lowerCAmelCase : Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__lowerCAmelCase : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('''\n'''.join(upper_files) + '''\n''')
__lowerCAmelCase : List[str] = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('''\n'''.join(space_files) + '''\n''')
__lowerCAmelCase : Union[str, Any] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('''\n'''.join(hyphen_files) + '''\n''')
__lowerCAmelCase : Any = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('''\n'''.join(nodir_files) + '''\n''')
__lowerCAmelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 58 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''roformer'''
def __init__( self , _lowercase=5_0_0_0_0 , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_5_3_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase=False , _lowercase=True , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = vocab_size
snake_case_ : Any = hidden_size if embedding_size is None else embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : List[str] = rotary_value
snake_case_ : str = use_cache
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 58 | 1 |
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 58 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Dict = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : Tuple = flatten_dict(__UpperCamelCase )
return flax_params
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
snake_case_ : Optional[Any] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Optional[int] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Dict = flax_dict[key]
snake_case_ : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = get_flax_param(__UpperCamelCase )
if not use_large:
snake_case_ : Optional[int] = PixaStructVisionConfig()
snake_case_ : Optional[Any] = PixaStructTextConfig()
else:
snake_case_ : Tuple = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
snake_case_ : List[str] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
snake_case_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
snake_case_ : Optional[int] = PixaStructForConditionalGeneration(__UpperCamelCase )
snake_case_ : str = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
snake_case_ : int = PixaStructImageProcessor()
snake_case_ : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : int = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("""Model saved in {}""".format(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 58 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCAmelCase : Tuple = 25_6047
__lowerCAmelCase : int = 25_6145
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = NllbTokenizer
_lowerCamelCase = NllbTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = {}
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : str = NllbTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = NllbTokenizer(_lowercase , keep_accents=_lowercase )
snake_case_ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ : Any = tempfile.mkdtemp()
snake_case_ : Optional[int] = tokenizer_r.save_pretrained(_lowercase )
snake_case_ : Optional[int] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case_ : Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
snake_case_ : Optional[int] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : str = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Optional[Any] = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
snake_case_ : Optional[int] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
snake_case_ : Union[str, Any] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : str = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
snake_case_ : str = tempfile.mkdtemp()
snake_case_ : Any = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
snake_case_ : Optional[int] = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : List[str] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : Optional[int] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
snake_case_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
snake_case_ : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
snake_case_ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
snake_case_ : Any = tokenizer.prepare_seqaseq_batch(
src_texts=_lowercase , tgt_texts=_lowercase , max_length=3 , max_target_length=1_0 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 1_0 )
# max_target_length will default to max_length if not specified
snake_case_ : str = tokenizer.prepare_seqaseq_batch(
_lowercase , tgt_texts=_lowercase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
snake_case_ : Optional[int] = tokenizer.prepare_seqaseq_batch(
src_texts=_lowercase , max_length=3 , max_target_length=1_0 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , _lowercase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : List[str] = [AddedToken("""<special>""" , lstrip=_lowercase )]
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , **_lowercase )
snake_case_ : Dict = tokenizer_r.encode("""Hey this is a <special> token""" )
snake_case_ : int = tokenizer_r.encode("""<special>""" , add_special_tokens=_lowercase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , **_lowercase )
snake_case_ : Tuple = tokenizer_p.encode("""Hey this is a <special> token""" )
snake_case_ : int = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = '''facebook/nllb-200-distilled-600M'''
_lowerCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_lowerCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_lowerCamelCase = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def UpperCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
snake_case_ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
snake_case_ : Dict = 1
return cls
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 2_5_6_0_5_7 )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : List[Any] = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
snake_case_ : int = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
snake_case_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , _lowercase )
snake_case_ : Optional[int] = 1_0
snake_case_ : Optional[int] = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _lowercase )
self.assertEqual(len(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_6_2_0_3, 3] )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = tempfile.mkdtemp()
snake_case_ : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowercase )
snake_case_ : Union[str, Any] = NllbTokenizer.from_pretrained(_lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase )
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case_ : Optional[Any] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 1_5) , batch.input_ids.shape )
self.assertEqual((2, 1_5) , batch.attention_mask.shape )
snake_case_ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
self.assertEqual(_lowercase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors="""pt""" )
snake_case_ : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=1_0 , return_tensors="""pt""" )
snake_case_ : str = targets["""input_ids"""]
snake_case_ : Optional[int] = shift_tokens_right(
_lowercase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(_lowercase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_6_0_5_7,
} , )
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = True
snake_case_ : Optional[int] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
snake_case_ : str = False
snake_case_ : List[Any] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__UpperCamelCase ) * abs(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 58 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__lowerCAmelCase : Optional[Any] = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _lowerCAmelCase ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , _lowercase = " " ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = sentence_delimiter
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
return list(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = []
for sent_idx, sentence in enumerate(_lowercase ):
chars.extend(self.process_string(_lowercase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_lowercase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__lowerCAmelCase : Union[str, Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__lowerCAmelCase : Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__lowerCAmelCase : int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : List[str] = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__lowerCAmelCase : Dict = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
_lowercase , _lowercase , truth_transform=_lowercase , hypothesis_transform=_lowercase , )["wer"]
snake_case_ : Optional[Any] = 0
snake_case_ : Optional[Any] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[int] = jiwer.compute_measures(
_lowercase , _lowercase , truth_transform=_lowercase , hypothesis_transform=_lowercase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionInpaintPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
snake_case_ : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case_ : Dict = CLIPTextModel(_lowercase )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : str = torch.manual_seed(_lowercase )
else:
snake_case_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**_lowercase )
snake_case_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowercase )
snake_case_ : List[str] = sd_pipe(**_lowercase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Optional[int] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = PNDMScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 58 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = None , _lowercase = True , _lowercase = 1 / 2_5_5 , _lowercase = True , _lowercase = IMAGENET_DEFAULT_MEAN , _lowercase = IMAGENET_DEFAULT_STD , **_lowercase , ) -> None:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_2_4}
snake_case_ : Tuple = get_size_dict(_lowercase , default_to_square=_lowercase )
snake_case_ : Tuple = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case_ : Optional[int] = get_size_dict(_lowercase , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Tuple = do_center_crop
snake_case_ : Optional[Any] = crop_size
snake_case_ : List[str] = do_rescale
snake_case_ : int = rescale_factor
snake_case_ : Tuple = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : List[Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case_ : Optional[Any] = int((2_5_6 / 2_2_4) * size["""shortest_edge"""] )
snake_case_ : Union[str, Any] = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase )
snake_case_ : Tuple = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
_lowercase , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_lowercase , size=(size["""height"""], size["""width"""]) , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
'''simple docstring'''
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
snake_case_ : List[str] = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Any = image_std if image_std is not None else self.image_std
snake_case_ : Optional[int] = size if size is not None else self.size
snake_case_ : List[str] = get_size_dict(_lowercase , default_to_square=_lowercase )
snake_case_ : Any = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(_lowercase , param_name="""crop_size""" )
snake_case_ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : List[str] = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(_lowercase , _lowercase , _lowercase ) for image in images]
if do_center_crop:
snake_case_ : Dict = [self.center_crop(_lowercase , _lowercase ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(_lowercase , _lowercase ) for image in images]
if do_normalize:
snake_case_ : List[str] = [self.normalize(_lowercase , _lowercase , _lowercase ) for image in images]
snake_case_ : List[str] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
snake_case_ : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 58 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | 1 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Dict = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : Tuple = flatten_dict(__UpperCamelCase )
return flax_params
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
snake_case_ : Optional[Any] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Optional[int] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Dict = flax_dict[key]
snake_case_ : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = get_flax_param(__UpperCamelCase )
if not use_large:
snake_case_ : Optional[int] = PixaStructVisionConfig()
snake_case_ : Optional[Any] = PixaStructTextConfig()
else:
snake_case_ : Tuple = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
snake_case_ : List[str] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
snake_case_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
snake_case_ : Optional[int] = PixaStructForConditionalGeneration(__UpperCamelCase )
snake_case_ : str = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
snake_case_ : int = PixaStructImageProcessor()
snake_case_ : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : int = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("""Model saved in {}""".format(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 58 |
"""simple docstring"""
__lowerCAmelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase : Union[str, Any] = '''docs/source/en/_toctree.yml'''
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = defaultdict(__UpperCamelCase )
snake_case_ : str = []
snake_case_ : Optional[int] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__UpperCamelCase )
snake_case_ : str = new_doc_list
snake_case_ : str = [key for key, value in counts.items() if value > 1]
snake_case_ : str = []
for duplicate_key in duplicates:
snake_case_ : Union[str, Any] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
snake_case_ : List[Any] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__UpperCamelCase )
# Sort
return overview_doc
def __lowerCAmelCase ( __UpperCamelCase : Dict=False ):
'''simple docstring'''
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
snake_case_ : int = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : int = content[api_idx]["""sections"""]
# Then to the model doc
snake_case_ : Optional[int] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
snake_case_ : List[str] = api_doc[scheduler_idx]["""sections"""]
snake_case_ : Dict = clean_doc_toc(__UpperCamelCase )
snake_case_ : Optional[int] = False
if new_scheduler_doc != scheduler_doc:
snake_case_ : Any = True
if overwrite:
snake_case_ : str = new_scheduler_doc
if diff:
if overwrite:
snake_case_ : Union[str, Any] = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
snake_case_ : Optional[int] = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : Dict = content[api_idx]["""sections"""]
# Then to the model doc
snake_case_ : Optional[int] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
snake_case_ : Any = False
snake_case_ : int = api_doc[pipeline_idx]["""sections"""]
snake_case_ : int = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
snake_case_ : str = pipeline_doc["""section"""]
snake_case_ : Dict = clean_doc_toc(__UpperCamelCase )
if overwrite:
snake_case_ : int = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCamelCase )
# sort overall pipeline doc
snake_case_ : str = clean_doc_toc(__UpperCamelCase )
if new_pipeline_docs != pipeline_docs:
snake_case_ : Optional[Any] = True
if overwrite:
snake_case_ : Dict = new_pipeline_docs
if diff:
if overwrite:
snake_case_ : Optional[Any] = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowerCAmelCase : Any = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 58 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 | 1 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str = "cpu" , __UpperCamelCase : Union[str, None] = None ):
'''simple docstring'''
snake_case_ : str = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__UpperCamelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
snake_case_ : Tuple = v.half()
if save_path is None: # overwrite src_path
snake_case_ : List[str] = src_path
torch.save(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : bytes ):
'''simple docstring'''
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(__UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 1 |
"""simple docstring"""
from PIL import Image
def __lowerCAmelCase ( __UpperCamelCase : Image , __UpperCamelCase : float ):
'''simple docstring'''
def brightness(__UpperCamelCase : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__lowerCAmelCase : Tuple = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
snake_case_ : List[str] = 2**power
snake_case_ : List[Any] = 0
while n:
snake_case_ , snake_case_ : str = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[int] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : bool = False ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[Any] = F'Expected string as input, found {type(__UpperCamelCase )}'
raise ValueError(__UpperCamelCase )
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[Any] = F'Expected boolean as use_pascal parameter, found {type(__UpperCamelCase )}'
raise ValueError(__UpperCamelCase )
snake_case_ : Any = input_str.split("""_""" )
snake_case_ : Dict = 0 if use_pascal else 1
snake_case_ : Optional[int] = words[start_index:]
snake_case_ : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
snake_case_ : List[Any] = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = {}
snake_case_ : Optional[int] = {}
if prompt is not None:
snake_case_ : Tuple = prompt
if generate_kwargs is not None:
snake_case_ : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
snake_case_ : Tuple = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
snake_case_ : Optional[int] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowercase , **_lowercase ) -> str:
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = load_image(_lowercase )
if prompt is not None:
if not isinstance(_lowercase , _lowercase ):
raise ValueError(
f'Received an invalid text input, got - {type(_lowercase )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
snake_case_ : Dict = self.model.config.model_type
if model_type == "git":
snake_case_ : Tuple = self.image_processor(images=_lowercase , return_tensors=self.framework )
snake_case_ : Optional[int] = self.tokenizer(text=_lowercase , add_special_tokens=_lowercase ).input_ids
snake_case_ : List[str] = [self.tokenizer.cls_token_id] + input_ids
snake_case_ : Union[str, Any] = torch.tensor(_lowercase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
snake_case_ : int = self.image_processor(images=_lowercase , header_text=_lowercase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
snake_case_ : Any = self.image_processor(images=_lowercase , return_tensors=self.framework )
snake_case_ : List[Any] = self.tokenizer(_lowercase , return_tensors=self.framework )
model_inputs.update(_lowercase )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
snake_case_ : List[str] = self.image_processor(images=_lowercase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
snake_case_ : Tuple = None
return model_inputs
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> List[str]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowercase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
snake_case_ : Optional[Any] = None
if generate_kwargs is None:
snake_case_ : List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
snake_case_ : Dict = model_inputs.pop(self.model.main_input_name )
snake_case_ : Optional[Any] = self.model.generate(_lowercase , **_lowercase , **_lowercase )
return model_outputs
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
for output_ids in model_outputs:
snake_case_ : Dict = {
"""generated_text""": self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , )
}
records.append(_lowercase )
return records
| 58 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
snake_case_ : Tuple = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__UpperCamelCase )
DownloadCommand.register_subcommand(__UpperCamelCase )
EnvironmentCommand.register_subcommand(__UpperCamelCase )
RunCommand.register_subcommand(__UpperCamelCase )
ServeCommand.register_subcommand(__UpperCamelCase )
UserCommands.register_subcommand(__UpperCamelCase )
AddNewModelCommand.register_subcommand(__UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(__UpperCamelCase )
LfsCommands.register_subcommand(__UpperCamelCase )
PTtoTFCommand.register_subcommand(__UpperCamelCase )
# Let's go
snake_case_ : Union[str, Any] = parser.parse_args()
if not hasattr(__UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
snake_case_ : List[Any] = args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 58 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0_0_0_0 , __UpperCamelCase : int = 1_0 ):
'''simple docstring'''
snake_case_ : defaultdict = defaultdict(__UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ : Optional[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ : List[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
if curr_ind == len(__UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCamelCase ) ):
if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Insert current vertex into path as next transition
snake_case_ : List[str] = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ : Tuple = -1
return False
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ):
'''simple docstring'''
snake_case_ : Tuple = [-1] * (len(__UpperCamelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
| 58 | 1 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
# add QFormer tokenizer
snake_case_ : List[str] = qformer_tokenizer
def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case_ : Optional[Any] = BatchFeature()
if text is not None:
snake_case_ : List[str] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase )
snake_case_ : Union[str, Any] = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" )
snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_lowercase )
return super().save_pretrained(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" )
snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase )
args.append(_lowercase )
return cls(*_lowercase )
| 58 | 1 |
"""simple docstring"""
import argparse
import datetime
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
snake_case_ : Union[str, Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__UpperCamelCase ) < 1_1:
raise ValueError("""Must be 10 characters long""" )
# Get month
snake_case_ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError("""Month must be between 1 - 12""" )
snake_case_ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
snake_case_ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
snake_case_ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
snake_case_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
snake_case_ : Union[str, Any] = datetime.date(int(__UpperCamelCase ) , int(__UpperCamelCase ) , int(__UpperCamelCase ) )
# Start math
if m <= 2:
snake_case_ : Any = y - 1
snake_case_ : Union[str, Any] = m + 1_2
# maths var
snake_case_ : int = int(str(__UpperCamelCase )[:2] )
snake_case_ : int = int(str(__UpperCamelCase )[2:] )
snake_case_ : int = int(2.6 * m - 5.39 )
snake_case_ : int = int(c / 4 )
snake_case_ : int = int(k / 4 )
snake_case_ : int = int(d + k )
snake_case_ : int = int(t + u + v + x )
snake_case_ : int = int(z - (2 * c) )
snake_case_ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
snake_case_ : str = F'Your date {date_input}, is a {days[str(__UpperCamelCase )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : str = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
__lowerCAmelCase : List[Any] = parser.parse_args()
zeller(args.date_input)
| 58 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | 1 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase = "cpu" , _lowercase = "openai/clip-vit-large-patch14" ) -> None:
'''simple docstring'''
snake_case_ : Optional[Any] = device
snake_case_ : List[Any] = CLIPTokenizerFast.from_pretrained(_lowercase )
snake_case_ : Optional[int] = [0.4814_5466, 0.457_8275, 0.4082_1073]
snake_case_ : Union[str, Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
snake_case_ : str = torchvision.transforms.Normalize(self.image_mean , self.image_std )
snake_case_ : Any = torchvision.transforms.Resize(2_2_4 )
snake_case_ : str = torchvision.transforms.CenterCrop(2_2_4 )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : int = self.resize(_lowercase )
snake_case_ : int = self.center_crop(_lowercase )
snake_case_ : Union[str, Any] = self.normalize(_lowercase )
return images
def __call__( self , _lowercase=None , _lowercase=None , **_lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer(text=_lowercase , **_lowercase )
snake_case_ : Optional[int] = self.preprocess_img(_lowercase )
snake_case_ : Optional[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase=1_0 , _lowercase=0.01 , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=True , _lowercase="image" , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , ) -> None:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = None
snake_case_ : List[str] = device if device else get_device()
if vqgan:
snake_case_ : str = vqgan
else:
snake_case_ : Any = load_vqgan(self.device , conf_path=_lowercase , ckpt_path=_lowercase )
self.vqgan.eval()
if clip:
snake_case_ : int = clip
else:
snake_case_ : List[str] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
snake_case_ : Tuple = ProcessorGradientFlow(device=self.device )
snake_case_ : int = iterations
snake_case_ : str = lr
snake_case_ : int = log
snake_case_ : List[Any] = make_grid
snake_case_ : Tuple = return_val
snake_case_ : Any = quantize
snake_case_ : Optional[int] = self.vqgan.decoder.z_shape
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=5 , _lowercase=True ) -> int:
'''simple docstring'''
snake_case_ : Tuple = []
if output_path is None:
snake_case_ : List[Any] = """./animation.gif"""
if input_path is None:
snake_case_ : Dict = self.save_path
snake_case_ : Optional[Any] = sorted(glob(input_path + """/*""" ) )
if not len(_lowercase ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(_lowercase ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
snake_case_ : List[Any] = total_duration / len(_lowercase )
snake_case_ : List[str] = [frame_duration] * len(_lowercase )
if extend_frames:
snake_case_ : Dict = 1.5
snake_case_ : Union[str, Any] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(_lowercase ) )
imageio.mimsave(_lowercase , _lowercase , duration=_lowercase )
print(f'gif saved to {output_path}' )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None ) -> Tuple:
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
snake_case_ : Any = preprocess(Image.open(_lowercase ) , target_image_size=2_5_6 ).to(self.device )
snake_case_ : Dict = preprocess_vqgan(_lowercase )
snake_case_ , *snake_case_ : str = self.vqgan.encode(_lowercase )
return z
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.latent.detach().requires_grad_()
snake_case_ : Union[str, Any] = base_latent + transform_vector
if self.quantize:
snake_case_ , *snake_case_ : Optional[int] = self.vqgan.quantize(_lowercase )
else:
snake_case_ : List[str] = trans_latent
return self.vqgan.decode(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.clip_preprocessor(text=_lowercase , images=_lowercase , return_tensors="""pt""" , padding=_lowercase )
snake_case_ : Union[str, Any] = self.clip(**_lowercase )
snake_case_ : List[Any] = clip_outputs.logits_per_image
if weights is not None:
snake_case_ : int = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""] , _lowercase , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
snake_case_ : Tuple = self._get_clip_similarity(neg_prompts["""prompts"""] , _lowercase , weights=neg_prompts["""weights"""] )
else:
snake_case_ : Any = torch.tensor([1] , device=self.device )
snake_case_ : Any = -torch.log(_lowercase ) + torch.log(_lowercase )
return loss
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = torch.randn_like(self.latent , requires_grad=_lowercase , device=self.device )
snake_case_ : Dict = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case_ : List[str] = self._add_vector(_lowercase )
snake_case_ : Union[str, Any] = loop_post_process(_lowercase )
snake_case_ : int = self._get_CLIP_loss(_lowercase , _lowercase , _lowercase )
print("""CLIP loss""" , _lowercase )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=_lowercase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
wandb.init(reinit=_lowercase , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
snake_case_ : Tuple = Image.open(_lowercase )
snake_case_ : str = image.resize((2_5_6, 2_5_6) )
wandb.log("""Original Image""" , wandb.Image(_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase ) -> Any:
'''simple docstring'''
if not prompts:
return []
snake_case_ : List[str] = []
snake_case_ : str = []
if isinstance(_lowercase , _lowercase ):
snake_case_ : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(_lowercase , (tuple, list) ):
snake_case_ : Union[str, Any] = prompt[0]
snake_case_ : Dict = float(prompt[1] )
elif ":" in prompt:
snake_case_ , snake_case_ : Tuple = prompt.split(""":""" )
snake_case_ : Optional[Any] = float(_lowercase )
else:
snake_case_ : Dict = prompt
snake_case_ : Optional[Any] = 1.0
processed_prompts.append(_lowercase )
weights.append(_lowercase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_lowercase , device=self.device ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=None , ) -> Union[str, Any]:
'''simple docstring'''
if image_path:
snake_case_ : Tuple = self._get_latent(_lowercase )
else:
snake_case_ : Dict = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_lowercase , _lowercase , _lowercase )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case_ : Tuple = self.process_prompts(_lowercase )
snake_case_ : int = self.process_prompts(_lowercase )
if save_final and save_path is None:
snake_case_ : Optional[Any] = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(_lowercase ):
os.makedirs(_lowercase )
else:
snake_case_ : List[str] = save_path + """_""" + get_timestamp()
os.makedirs(_lowercase )
snake_case_ : List[str] = save_path
snake_case_ : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(_lowercase ) )
snake_case_ : Tuple = loop_post_process(_lowercase )
for iter, transformed_img in enumerate(self._optimize_CLIP(_lowercase , _lowercase , _lowercase ) ):
if show_intermediate:
show_pil(_lowercase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(_lowercase )} )
if show_final:
show_pil(_lowercase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}_final.png' ) )
| 58 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58 | 1 |
"""simple docstring"""
import argparse
import json
import subprocess
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : str = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
snake_case_ : List[str] = subprocess.run(__UpperCamelCase , shell=__UpperCamelCase , stdout=subprocess.PIPE )
snake_case_ : List[Any] = output.stdout.decode("""utf-8""" )
snake_case_ : int = json.loads(__UpperCamelCase )
snake_case_ : Tuple = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
if len(__UpperCamelCase ) > 0:
snake_case_ : Union[str, Any] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
return values.split(""",""" )
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
__lowerCAmelCase : Optional[Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 58 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=1_8 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = size if size is not None else {"""shortest_edge""": 1_8}
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : List[str] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Any = image_size
snake_case_ : int = min_resolution
snake_case_ : Tuple = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : int = size
snake_case_ : Union[str, Any] = do_center_crop
snake_case_ : Union[str, Any] = crop_size
snake_case_ : Optional[int] = do_normalize
snake_case_ : List[str] = image_mean
snake_case_ : Optional[Any] = image_std
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = LevitImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
snake_case_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : str = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : Dict = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 58 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58 | 1 |
"""simple docstring"""
import os
__lowerCAmelCase : List[str] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[str] = 0
snake_case_ : Tuple = 0
while index < len(__UpperCamelCase ) - 1:
snake_case_ : Any = SYMBOLS[numerals[index]]
snake_case_ : Dict = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """"""
snake_case_ : Optional[Any] = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ : List[str] = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ : List[str] = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowerCAmelCase ( __UpperCamelCase : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ : Optional[Any] = 0
with open(os.path.dirname(__UpperCamelCase ) + roman_numerals_filename ) as filea:
snake_case_ : Any = filea.readlines()
for line in lines:
snake_case_ : Tuple = line.strip()
snake_case_ : Optional[int] = parse_roman_numerals(__UpperCamelCase )
snake_case_ : Tuple = generate_roman_numerals(__UpperCamelCase )
savings += len(__UpperCamelCase ) - len(__UpperCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 58 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''roformer'''
def __init__( self , _lowercase=5_0_0_0_0 , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_5_3_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase=False , _lowercase=True , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = vocab_size
snake_case_ : Any = hidden_size if embedding_size is None else embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : List[str] = rotary_value
snake_case_ : str = use_cache
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 58 | 1 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __lowerCAmelCase ( __UpperCamelCase : Tuple=None , __UpperCamelCase : int=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = field(
metadata={'''help''': '''The csv file to plot.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
_lowerCamelCase = list_field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
try:
int(__UpperCamelCase )
return True
except ValueError:
return False
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
try:
float(__UpperCamelCase )
return True
except ValueError:
return False
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = args
snake_case_ : int = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
snake_case_ : int = csv.DictReader(_lowercase )
for row in reader:
snake_case_ : Union[str, Any] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
snake_case_ : str = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
snake_case_ : Optional[int] = float(row["""result"""] )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : List[str] = plt.subplots()
snake_case_ : Dict = """Time usage""" if self.args.is_time else """Memory usage"""
snake_case_ : str = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
snake_case_ : str = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
snake_case_ : Tuple = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
snake_case_ : Tuple = self.result_dict[model_name]["""result"""]
((snake_case_) , (snake_case_)) : int = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
snake_case_ : Optional[Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
snake_case_ : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowercase , )
else:
snake_case_ : Dict = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((snake_case_) , (snake_case_)) : Tuple = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
snake_case_ : Dict = np.asarray(_lowercase , _lowercase )[: len(_lowercase )]
plt.scatter(
_lowercase , _lowercase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(_lowercase , _lowercase , """--""" )
title_str += f' {label_model_name} vs.'
snake_case_ : Tuple = title_str[:-4]
snake_case_ : Union[str, Any] = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(_lowercase )
plt.xlabel(_lowercase )
plt.ylabel(_lowercase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = HfArgumentParser(__UpperCamelCase )
snake_case_ : List[Any] = parser.parse_args_into_dataclasses()[0]
snake_case_ : Dict = Plot(args=__UpperCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 58 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Dict = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : Tuple = flatten_dict(__UpperCamelCase )
return flax_params
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
snake_case_ : Optional[Any] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Optional[int] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Dict = flax_dict[key]
snake_case_ : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = get_flax_param(__UpperCamelCase )
if not use_large:
snake_case_ : Optional[int] = PixaStructVisionConfig()
snake_case_ : Optional[Any] = PixaStructTextConfig()
else:
snake_case_ : Tuple = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
snake_case_ : List[str] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
snake_case_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
snake_case_ : Optional[int] = PixaStructForConditionalGeneration(__UpperCamelCase )
snake_case_ : str = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
snake_case_ : int = PixaStructImageProcessor()
snake_case_ : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : int = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("""Model saved in {}""".format(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 58 | 1 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=9_9 , _lowercase=1_3 , _lowercase=1_6 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=2 , _lowercase=3_2 , _lowercase=4 , _lowercase=4 , _lowercase=3_0 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=None , ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Union[str, Any] = decoder_seq_length
# For common tests
snake_case_ : Optional[Any] = self.decoder_seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : Optional[Any] = use_attention_mask
snake_case_ : Any = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[int] = d_model
snake_case_ : Dict = d_model
snake_case_ : Optional[int] = decoder_layers
snake_case_ : List[Any] = decoder_layers
snake_case_ : Optional[Any] = decoder_ffn_dim
snake_case_ : Optional[int] = decoder_attention_heads
snake_case_ : Tuple = decoder_attention_heads
snake_case_ : Tuple = eos_token_id
snake_case_ : Optional[Any] = bos_token_id
snake_case_ : str = pad_token_id
snake_case_ : List[str] = decoder_start_token_id
snake_case_ : List[Any] = use_cache
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : str = None
snake_case_ : int = decoder_seq_length
snake_case_ : int = 2
snake_case_ : Dict = 1
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : List[str] = None
if self.use_attention_mask:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case_ : int = None
if self.use_labels:
snake_case_ : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = True
snake_case_ : Tuple = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
snake_case_ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case_ : Any = model(_lowercase , use_cache=_lowercase )
snake_case_ : Union[str, Any] = model(_lowercase )
snake_case_ : str = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
snake_case_ : str = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
snake_case_ : List[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case_ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : Union[str, Any] = model(_lowercase )["""last_hidden_state"""]
snake_case_ : Optional[Any] = model(_lowercase , past_key_values=_lowercase )["""last_hidden_state"""]
# select random slice
snake_case_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case_ : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = config_and_inputs
snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
_lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
_lowerCamelCase = True
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
snake_case_ : int = ConfigTester(self , config_class=_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__UpperCamelCase ) * abs(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
if curr_ind == len(__UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCamelCase ) ):
if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Insert current vertex into path as next transition
snake_case_ : List[str] = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ : Tuple = -1
return False
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ):
'''simple docstring'''
snake_case_ : Tuple = [-1] * (len(__UpperCamelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
| 58 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionInpaintPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
snake_case_ : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case_ : Dict = CLIPTextModel(_lowercase )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : str = torch.manual_seed(_lowercase )
else:
snake_case_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**_lowercase )
snake_case_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowercase )
snake_case_ : List[str] = sd_pipe(**_lowercase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Optional[int] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = PNDMScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Optional[Any] = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 |
"""simple docstring"""
__lowerCAmelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = None
_lowerCamelCase = None
def __lowerCAmelCase ( __UpperCamelCase : TreeNode | None ):
'''simple docstring'''
def is_valid_tree(__UpperCamelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__UpperCamelCase ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
__UpperCamelCase : TreeNode | None , __UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __UpperCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __UpperCamelCase )
)
return is_binary_search_tree_recursive_check(__UpperCamelCase , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 | 1 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = (EulerDiscreteScheduler,)
_lowerCamelCase = 10
def UpperCAmelCase__ ( self , **_lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_lowercase )
return config
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowercase )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Any = self.get_scheduler_config()
snake_case_ : int = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : Dict = torch.manual_seed(0 )
snake_case_ : int = self.dummy_model()
snake_case_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Optional[Any] = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Union[str, Any] = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : Tuple = model(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
snake_case_ : List[Any] = output.prev_sample
snake_case_ : List[Any] = torch.sum(torch.abs(_lowercase ) )
snake_case_ : str = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
snake_case_ : Optional[Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Union[str, Any] = self.dummy_model()
snake_case_ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Union[str, Any] = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Dict = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : Any = model(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
snake_case_ : Optional[Any] = output.prev_sample
snake_case_ : int = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Tuple = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config()
snake_case_ : str = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Tuple = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case_ : int = sample.to(_lowercase )
for t in scheduler.timesteps:
snake_case_ : Union[str, Any] = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : Dict = model(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
snake_case_ : Union[str, Any] = output.prev_sample
snake_case_ : Any = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Tuple = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = self.scheduler_classes[0]
snake_case_ : List[Any] = self.get_scheduler_config()
snake_case_ : Tuple = scheduler_class(**_lowercase , use_karras_sigmas=_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Union[str, Any] = self.dummy_model()
snake_case_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case_ : List[str] = sample.to(_lowercase )
for t in scheduler.timesteps:
snake_case_ : Dict = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : str = model(_lowercase , _lowercase )
snake_case_ : int = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
snake_case_ : Any = output.prev_sample
snake_case_ : Dict = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Union[str, Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58 | 1 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Dict = tau * frequency / samplerate
snake_case_ : List[Any] = sin(__UpperCamelCase )
snake_case_ : List[str] = cos(__UpperCamelCase )
snake_case_ : List[Any] = _sin / (2 * q_factor)
snake_case_ : List[Any] = (1 - _cos) / 2
snake_case_ : Any = 1 - _cos
snake_case_ : Union[str, Any] = 1 + alpha
snake_case_ : Tuple = -2 * _cos
snake_case_ : str = 1 - alpha
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Optional[Any] = tau * frequency / samplerate
snake_case_ : Dict = sin(__UpperCamelCase )
snake_case_ : Union[str, Any] = cos(__UpperCamelCase )
snake_case_ : Optional[int] = _sin / (2 * q_factor)
snake_case_ : List[Any] = (1 + _cos) / 2
snake_case_ : Union[str, Any] = -1 - _cos
snake_case_ : Any = 1 + alpha
snake_case_ : Any = -2 * _cos
snake_case_ : List[Any] = 1 - alpha
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tau * frequency / samplerate
snake_case_ : Optional[int] = sin(__UpperCamelCase )
snake_case_ : Dict = cos(__UpperCamelCase )
snake_case_ : List[Any] = _sin / (2 * q_factor)
snake_case_ : str = _sin / 2
snake_case_ : Any = 0
snake_case_ : List[Any] = -ba
snake_case_ : List[Any] = 1 + alpha
snake_case_ : List[str] = -2 * _cos
snake_case_ : Optional[int] = 1 - alpha
snake_case_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Tuple = tau * frequency / samplerate
snake_case_ : Union[str, Any] = sin(__UpperCamelCase )
snake_case_ : Union[str, Any] = cos(__UpperCamelCase )
snake_case_ : List[str] = _sin / (2 * q_factor)
snake_case_ : str = 1 - alpha
snake_case_ : Union[str, Any] = -2 * _cos
snake_case_ : int = 1 + alpha
snake_case_ : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : str = tau * frequency / samplerate
snake_case_ : str = sin(__UpperCamelCase )
snake_case_ : Union[str, Any] = cos(__UpperCamelCase )
snake_case_ : List[str] = _sin / (2 * q_factor)
snake_case_ : Tuple = 1_0 ** (gain_db / 4_0)
snake_case_ : List[Any] = 1 + alpha * big_a
snake_case_ : Optional[int] = -2 * _cos
snake_case_ : Dict = 1 - alpha * big_a
snake_case_ : List[Any] = 1 + alpha / big_a
snake_case_ : Union[str, Any] = -2 * _cos
snake_case_ : Any = 1 - alpha / big_a
snake_case_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : Optional[int] = tau * frequency / samplerate
snake_case_ : Optional[int] = sin(__UpperCamelCase )
snake_case_ : List[str] = cos(__UpperCamelCase )
snake_case_ : Optional[Any] = _sin / (2 * q_factor)
snake_case_ : Tuple = 1_0 ** (gain_db / 4_0)
snake_case_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : Any = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : List[str] = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : List[Any] = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : Optional[Any] = 2 * sqrt(__UpperCamelCase ) * alpha
snake_case_ : Optional[int] = big_a * (pmc + aaa)
snake_case_ : int = 2 * big_a * mpc
snake_case_ : List[Any] = big_a * (pmc - aaa)
snake_case_ : Optional[Any] = ppmc + aaa
snake_case_ : List[Any] = -2 * pmpc
snake_case_ : Optional[Any] = ppmc - aaa
snake_case_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : List[str] = tau * frequency / samplerate
snake_case_ : List[str] = sin(__UpperCamelCase )
snake_case_ : List[str] = cos(__UpperCamelCase )
snake_case_ : Any = _sin / (2 * q_factor)
snake_case_ : Optional[int] = 1_0 ** (gain_db / 4_0)
snake_case_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : Dict = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : int = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : Tuple = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : str = 2 * sqrt(__UpperCamelCase ) * alpha
snake_case_ : str = big_a * (ppmc + aaa)
snake_case_ : List[Any] = -2 * big_a * pmpc
snake_case_ : Union[str, Any] = big_a * (ppmc - aaa)
snake_case_ : Any = pmc + aaa
snake_case_ : Dict = 2 * mpc
snake_case_ : List[Any] = pmc - aaa
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 58 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 1 |
"""simple docstring"""
from collections.abc import Generator
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ , snake_case_ : Any = 0, 1
while True:
snake_case_ , snake_case_ : Optional[int] = b, a + b
yield b
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
snake_case_ : Tuple = 1
snake_case_ : int = fibonacci_generator()
while len(str(next(__UpperCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = 3_2 , _lowercase=PILImageResampling.BILINEAR , _lowercase = True , **_lowercase , ) -> None:
'''simple docstring'''
snake_case_ : Any = do_resize
snake_case_ : Dict = do_rescale
snake_case_ : Union[str, Any] = size_divisor
snake_case_ : Tuple = resample
super().__init__(**_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase ) -> np.ndarray:
'''simple docstring'''
snake_case_ , snake_case_ : Any = get_image_size(_lowercase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case_ : Any = height // size_divisor * size_divisor
snake_case_ : str = width // size_divisor * size_divisor
snake_case_ : Dict = resize(_lowercase , (new_h, new_w) , resample=_lowercase , data_format=_lowercase , **_lowercase )
return image
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase ) -> np.ndarray:
'''simple docstring'''
return rescale(image=_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase=None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
snake_case_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : int = size_divisor if size_divisor is not None else self.size_divisor
snake_case_ : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
snake_case_ : List[str] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
snake_case_ : List[Any] = [to_numpy_array(_lowercase ) for img in images]
if do_resize:
snake_case_ : int = [self.resize(_lowercase , size_divisor=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
snake_case_ : List[str] = [self.rescale(_lowercase , scale=1 / 2_5_5 ) for image in images]
snake_case_ : Tuple = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
snake_case_ : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''audio''': Audio()} )
_lowerCamelCase = Features({'''transcription''': Value('''string''' )} )
_lowerCamelCase = "audio"
_lowerCamelCase = "transcription"
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , _lowercase ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
snake_case_ : List[str] = copy.deepcopy(self )
snake_case_ : int = self.input_schema.copy()
snake_case_ : Tuple = features[self.audio_column]
snake_case_ : Any = input_schema
return task_template
@property
def UpperCAmelCase__ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 58 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase , _lowercase = None , _lowercase = None ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ : Any = torch.zeros(_lowercase , _lowercase )
else:
snake_case_ : List[str] = None
snake_case_ : Any = torch.nn.Parameter(_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=_lowercase , transformer=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1
# get prompt text embeddings
snake_case_ : List[str] = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case_ : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
snake_case_ : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ : int = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate text embeddings for each generation per prompt
snake_case_ : List[str] = prompt_embeds.repeat_interleave(_lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ : Any = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ : Tuple = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowercase , 1 , 1 )
else:
snake_case_ : Union[str, Any] = [""""""] * batch_size
snake_case_ : Optional[Any] = text_input_ids.shape[-1]
snake_case_ : str = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=_lowercase , truncation=_lowercase , return_tensors="""pt""" , )
snake_case_ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ : List[str] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ : Optional[Any] = negative_prompt_embeds.shape[1]
snake_case_ : Optional[Any] = negative_prompt_embeds.repeat(1 , _lowercase , 1 )
snake_case_ : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _lowercase , _lowercase = 1_0_0 , _lowercase = 5.0 , _lowercase = 1.0 , _lowercase = 1 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
snake_case_ : Dict = 1
elif isinstance(_lowercase , _lowercase ):
snake_case_ : Dict = len(_lowercase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_lowercase )}' )
snake_case_ : str = batch_size * num_images_per_prompt
snake_case_ : str = guidance_scale > 1.0
snake_case_ : Optional[Any] = self._encode_prompt(_lowercase , _lowercase , _lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_lowercase )}.' )
# get the initial completely masked latents unless the user supplied it
snake_case_ : str = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ : str = self.transformer.num_vector_embeds - 1
snake_case_ : List[Any] = torch.full(_lowercase , _lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
snake_case_ : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
snake_case_ : List[str] = self.scheduler.timesteps.to(self.device )
snake_case_ : List[str] = latents
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ : Optional[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ : Dict = self.transformer(_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase ).sample
if do_classifier_free_guidance:
snake_case_ , snake_case_ : List[str] = model_output.chunk(2 )
snake_case_ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowercase , dim=1 , keepdim=_lowercase )
snake_case_ : Tuple = self.truncate(_lowercase , _lowercase )
# remove `log(0)`'s (`-inf`s)
snake_case_ : List[str] = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Tuple = self.scheduler.step(_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
snake_case_ : Any = self.vqvae.config.vq_embed_dim
snake_case_ : Tuple = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ : Dict = self.vqvae.quantize.get_codebook_entry(_lowercase , shape=_lowercase )
snake_case_ : Any = self.vqvae.decode(_lowercase , force_not_quantize=_lowercase ).sample
snake_case_ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Tuple = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> torch.FloatTensor:
'''simple docstring'''
snake_case_ , snake_case_ : Union[str, Any] = torch.sort(_lowercase , 1 , descending=_lowercase )
snake_case_ : Optional[Any] = torch.exp(_lowercase )
snake_case_ : Dict = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ : Optional[int] = torch.full_like(keep_mask[:, 0:1, :] , _lowercase )
snake_case_ : int = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ : Optional[Any] = keep_mask[:, :-1, :]
snake_case_ : List[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ : str = log_p_x_0.clone()
snake_case_ : str = -torch.inf # -inf = log(0)
return rv
| 58 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : int = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = ['''PoolFormerFeatureExtractor''']
__lowerCAmelCase : Tuple = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 58 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCAmelCase : Optional[int] = object()
# For specifying empty leaf dict `{}`
__lowerCAmelCase : Any = object()
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Any = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
snake_case_ : List[Any] = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
def replace(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
for rule, replacement in rules:
if _match(__UpperCamelCase , __UpperCamelCase ):
return replacement
return val
return replace
def __lowerCAmelCase ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __UpperCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , __UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[int] = _get_partition_rules()
snake_case_ : Optional[int] = _replacement_rules(__UpperCamelCase )
snake_case_ : List[str] = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
snake_case_ : List[str] = {k: replace(__UpperCamelCase , __UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) )
| 58 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ), F'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
snake_case_ : Union[str, Any] = F'The input value of [n={number}] has to be > 0'
raise ValueError(__UpperCamelCase )
else:
snake_case_ : Tuple = sylvester(number - 1 )
snake_case_ : Any = num - 1
snake_case_ : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
if curr_ind == len(__UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCamelCase ) ):
if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Insert current vertex into path as next transition
snake_case_ : List[str] = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ : Tuple = -1
return False
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ):
'''simple docstring'''
snake_case_ : Tuple = [-1] * (len(__UpperCamelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
snake_case_ : Optional[Any] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
# add QFormer tokenizer
snake_case_ : List[str] = qformer_tokenizer
def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case_ : Optional[Any] = BatchFeature()
if text is not None:
snake_case_ : List[str] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase )
snake_case_ : Union[str, Any] = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" )
snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_lowercase )
return super().save_pretrained(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" )
snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase )
args.append(_lowercase )
return cls(*_lowercase )
| 58 | 1 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str , **__UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
snake_case_ : str = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 58 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | 1 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Dict=5 ):
'''simple docstring'''
assert masked_input.count("""<mask>""" ) == 1
snake_case_ : List[str] = torch.tensor(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
snake_case_ : List[str] = model(__UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
snake_case_ : List[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
snake_case_ : List[str] = logits[0, masked_index, :]
snake_case_ : Any = logits.softmax(dim=0 )
snake_case_ , snake_case_ : str = prob.topk(k=__UpperCamelCase , dim=0 )
snake_case_ : Optional[Any] = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCamelCase ) )] )
snake_case_ : Union[str, Any] = tokenizer.mask_token
snake_case_ : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
snake_case_ : Dict = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(__UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(__UpperCamelCase ) , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__UpperCamelCase , __UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__lowerCAmelCase : Dict = CamembertTokenizer.from_pretrained('''camembert-base''')
__lowerCAmelCase : List[str] = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
__lowerCAmelCase : str = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 58 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58 | 1 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = None
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : str = os.path.join(_lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(_lowercase )
snake_case_ : str = self.feature_extraction_class.from_json_file(_lowercase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Tuple = feat_extract_first.save_pretrained(_lowercase )[0]
check_json_file_has_correct_format(_lowercase )
snake_case_ : Dict = self.feature_extraction_class.from_pretrained(_lowercase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = self.feature_extraction_class()
self.assertIsNotNone(_lowercase )
| 58 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case_ : Any = 1
snake_case_ : Optional[int] = 1
while repunit:
snake_case_ : Optional[int] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__UpperCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 58 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58 | 1 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__lowerCAmelCase : Dict = logging.get_logger(__name__)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = question_encoder
snake_case_ : List[str] = generator
snake_case_ : Optional[Any] = self.question_encoder
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : Optional[Any] = os.path.join(_lowercase , """question_encoder_tokenizer""" )
snake_case_ : List[str] = os.path.join(_lowercase , """generator_tokenizer""" )
self.question_encoder.save_pretrained(_lowercase )
self.generator.save_pretrained(_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : Any = kwargs.pop("""config""" , _lowercase )
if config is None:
snake_case_ : int = RagConfig.from_pretrained(_lowercase )
snake_case_ : str = AutoTokenizer.from_pretrained(
_lowercase , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(
_lowercase , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=_lowercase , generator=_lowercase )
def __call__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
return self.current_tokenizer(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.generator.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
return self.generator.decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.question_encoder
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = self.generator
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = "longest" , _lowercase = None , _lowercase = True , **_lowercase , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , _lowercase , )
if max_length is None:
snake_case_ : List[Any] = self.current_tokenizer.model_max_length
snake_case_ : List[Any] = self(
_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , max_length=_lowercase , padding=_lowercase , truncation=_lowercase , **_lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : str = self.current_tokenizer.model_max_length
snake_case_ : Any = self(
text_target=_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , **_lowercase , )
snake_case_ : str = labels["""input_ids"""]
return model_inputs
| 58 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''roformer'''
def __init__( self , _lowercase=5_0_0_0_0 , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_5_3_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase=False , _lowercase=True , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = vocab_size
snake_case_ : Any = hidden_size if embedding_size is None else embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : List[str] = rotary_value
snake_case_ : str = use_cache
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 58 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionInpaintPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
snake_case_ : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case_ : Dict = CLIPTextModel(_lowercase )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : str = torch.manual_seed(_lowercase )
else:
snake_case_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**_lowercase )
snake_case_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowercase )
snake_case_ : List[str] = sd_pipe(**_lowercase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Optional[int] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = PNDMScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 58 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Dict = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : Tuple = flatten_dict(__UpperCamelCase )
return flax_params
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
snake_case_ : Optional[Any] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Optional[int] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Dict = flax_dict[key]
snake_case_ : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = get_flax_param(__UpperCamelCase )
if not use_large:
snake_case_ : Optional[int] = PixaStructVisionConfig()
snake_case_ : Optional[Any] = PixaStructTextConfig()
else:
snake_case_ : Tuple = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
snake_case_ : List[str] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
snake_case_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
snake_case_ : Optional[int] = PixaStructForConditionalGeneration(__UpperCamelCase )
snake_case_ : str = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
snake_case_ : int = PixaStructImageProcessor()
snake_case_ : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : int = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("""Model saved in {}""".format(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 58 | 1 |
"""simple docstring"""
import numpy
# List of input, output pairs
__lowerCAmelCase : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowerCAmelCase : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
__lowerCAmelCase : Union[str, Any] = [2, 4, 1, 5]
__lowerCAmelCase : Dict = len(train_data)
__lowerCAmelCase : int = 0.009
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any="train" ):
'''simple docstring'''
return calculate_hypothesis_value(__UpperCamelCase , __UpperCamelCase ) - output(
__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = 0
for i in range(len(__UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Dict ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=m ):
'''simple docstring'''
snake_case_ : Union[str, Any] = 0
for i in range(__UpperCamelCase ):
if index == -1:
summation_value += _error(__UpperCamelCase )
else:
summation_value += _error(__UpperCamelCase ) * train_data[i][0][index]
return summation_value
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = summation_of_cost_derivative(__UpperCamelCase , __UpperCamelCase ) / m
return cost_derivative_value
def __lowerCAmelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
snake_case_ : Optional[int] = 0.000_002
snake_case_ : Optional[Any] = 0
snake_case_ : Tuple = 0
while True:
j += 1
snake_case_ : str = [0, 0, 0, 0]
for i in range(0 , len(__UpperCamelCase ) ):
snake_case_ : List[str] = get_cost_derivative(i - 1 )
snake_case_ : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCamelCase , __UpperCamelCase , atol=__UpperCamelCase , rtol=__UpperCamelCase , ):
break
snake_case_ : List[str] = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
print(("""Actual output value:""", output(__UpperCamelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__UpperCamelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__UpperCamelCase ) * abs(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 58 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''audio-spectrogram-transformer'''
def __init__( self , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=1_6 , _lowercase=True , _lowercase=1_0 , _lowercase=1_0 , _lowercase=1_0_2_4 , _lowercase=1_2_8 , **_lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Tuple = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Optional[Any] = patch_size
snake_case_ : Tuple = qkv_bias
snake_case_ : Any = frequency_stride
snake_case_ : Dict = time_stride
snake_case_ : List[Any] = max_length
snake_case_ : List[str] = num_mel_bins
| 58 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionInpaintPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
snake_case_ : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case_ : Dict = CLIPTextModel(_lowercase )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : str = torch.manual_seed(_lowercase )
else:
snake_case_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**_lowercase )
snake_case_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowercase )
snake_case_ : List[str] = sd_pipe(**_lowercase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Optional[int] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = PNDMScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 58 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __lowerCAmelCase ( __UpperCamelCase : str = "isbn/0140328726" ):
'''simple docstring'''
snake_case_ : Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
snake_case_ : Any = F'{olid} is not a valid Open Library olid'
raise ValueError(__UpperCamelCase )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def __lowerCAmelCase ( __UpperCamelCase : dict ):
'''simple docstring'''
snake_case_ : int = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
snake_case_ : int = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
snake_case_ : Union[str, Any] = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
snake_case_ : Optional[int] = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : List[str] = """, """.join(__UpperCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__lowerCAmelCase : Optional[int] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__lowerCAmelCase : str = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print('''\n'''.join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 58 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[Any] = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
__lowerCAmelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58 | 1 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__lowerCAmelCase : Any = logging.getLogger(__name__)
__lowerCAmelCase : List[str] = '''Hello world! cécé herlolip'''
__lowerCAmelCase : Optional[int] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[int] = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__UpperCamelCase , large=__UpperCamelCase , share_emb=__UpperCamelCase , use_bert_emb=__UpperCamelCase , encoder="""bert""" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
snake_case_ : Any = torch.load(__UpperCamelCase , lambda __UpperCamelCase , __UpperCamelCase : storage )
snake_case_ : int = AbsSummarizer(__UpperCamelCase , torch.device("""cpu""" ) , __UpperCamelCase )
original.eval()
snake_case_ : Union[str, Any] = BertAbsSummarizer(__UpperCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
snake_case_ : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
snake_case_ : Union[str, Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(__UpperCamelCase )) )
snake_case_ : Any = torch.tensor(__UpperCamelCase ).unsqueeze(0 )
snake_case_ : Optional[Any] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(__UpperCamelCase )) )
snake_case_ : List[Any] = torch.tensor(__UpperCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
snake_case_ : List[Any] = encoder_input_ids
snake_case_ : List[Any] = decoder_input_ids
snake_case_ : List[str] = None
snake_case_ : Optional[Any] = None
snake_case_ : Tuple = None
snake_case_ : Union[str, Any] = None
snake_case_ : Optional[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
snake_case_ : Any = original(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )[0]
snake_case_ : List[Any] = original.generator(__UpperCamelCase )
snake_case_ : Dict = new_model(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )[0]
snake_case_ : Dict = new_model.generator(__UpperCamelCase )
snake_case_ : Optional[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__UpperCamelCase ) )
snake_case_ : List[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__UpperCamelCase ) )
snake_case_ : Optional[int] = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 58 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , )
def UpperCAmelCase__ ( self , _lowercase = "auto" ) -> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def __call__( self , _lowercase , _lowercase = 5_1_2 , _lowercase = 5_1_2 , _lowercase = 5_0 , _lowercase = 7.5 , _lowercase = None , _lowercase = 1 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , _lowercase = None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
snake_case_ : str = 1
elif isinstance(_lowercase , _lowercase ):
snake_case_ : int = len(_lowercase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_lowercase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_lowercase )}.' )
# get prompt text embeddings
snake_case_ : List[Any] = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
snake_case_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
snake_case_ : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case_ , snake_case_ , snake_case_ : List[str] = text_embeddings.shape
snake_case_ : Union[str, Any] = text_embeddings.repeat(1 , _lowercase , 1 )
snake_case_ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case_ : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case_ : List[str]
if negative_prompt is None:
snake_case_ : Optional[int] = [""""""]
elif type(_lowercase ) is not type(_lowercase ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !='
f' {type(_lowercase )}.' )
elif isinstance(_lowercase , _lowercase ):
snake_case_ : List[Any] = [negative_prompt]
elif batch_size != len(_lowercase ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
snake_case_ : Any = negative_prompt
snake_case_ : Tuple = text_input_ids.shape[-1]
snake_case_ : Tuple = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=_lowercase , truncation=_lowercase , return_tensors="""pt""" , )
snake_case_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ : Any = uncond_embeddings.shape[1]
snake_case_ : List[Any] = uncond_embeddings.repeat(_lowercase , _lowercase , 1 )
snake_case_ : Any = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case_ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case_ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
snake_case_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case_ : int = torch.randn(
_lowercase , generator=_lowercase , device="""cpu""" , dtype=_lowercase ).to(self.device )
snake_case_ : Any = torch.randn(_lowercase , generator=_lowercase , device="""cpu""" , dtype=_lowercase ).to(
self.device )
else:
snake_case_ : Union[str, Any] = torch.randn(
_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
snake_case_ : List[Any] = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
snake_case_ : str = latents_reference.to(self.device )
snake_case_ : List[Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
snake_case_ : int = (latents_shape[3] - latents_shape_reference[3]) // 2
snake_case_ : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
snake_case_ : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
snake_case_ : List[str] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
snake_case_ : Union[str, Any] = 0 if dx < 0 else dx
snake_case_ : List[str] = 0 if dy < 0 else dy
snake_case_ : Union[str, Any] = max(-dx , 0 )
snake_case_ : Dict = max(-dy , 0 )
# import pdb
# pdb.set_trace()
snake_case_ : Union[str, Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case_ : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ : Optional[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : Optional[Any] = {}
if accepts_eta:
snake_case_ : Tuple = eta
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : Union[str, Any] = self.scheduler.scale_model_input(_lowercase , _lowercase )
# predict the noise residual
snake_case_ : List[Any] = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case_ , snake_case_ : Tuple = noise_pred.chunk(2 )
snake_case_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Tuple = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
snake_case_ : Optional[Any] = 1 / 0.1_8215 * latents
snake_case_ : Any = self.vae.decode(_lowercase ).sample
snake_case_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
snake_case_ : Any = self.feature_extractor(self.numpy_to_pil(_lowercase ) , return_tensors="""pt""" ).to(
self.device )
snake_case_ , snake_case_ : str = self.safety_checker(
images=_lowercase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
snake_case_ : str = None
if output_type == "pil":
snake_case_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''gptj'''
_lowerCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=5_0_4_0_0 , _lowercase=2_0_4_8 , _lowercase=4_0_9_6 , _lowercase=2_8 , _lowercase=1_6 , _lowercase=6_4 , _lowercase=None , _lowercase="gelu_new" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=5_0_2_5_6 , _lowercase=5_0_2_5_6 , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : Dict = vocab_size
snake_case_ : str = n_positions
snake_case_ : List[str] = n_embd
snake_case_ : List[str] = n_layer
snake_case_ : str = n_head
snake_case_ : List[Any] = n_inner
snake_case_ : Union[str, Any] = rotary_dim
snake_case_ : str = activation_function
snake_case_ : Tuple = resid_pdrop
snake_case_ : str = embd_pdrop
snake_case_ : Optional[int] = attn_pdrop
snake_case_ : Tuple = layer_norm_epsilon
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Dict = use_cache
snake_case_ : Any = bos_token_id
snake_case_ : Dict = eos_token_id
super().__init__(
bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = "default" , _lowercase = None , _lowercase = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , """pad_token_id""" , _lowercase ):
# TODO: how to do that better?
snake_case_ : Dict = 0
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction="""inputs""" )
snake_case_ : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
snake_case_ : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case_ , snake_case_ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case_ : List[Any] = seqlen + 2
snake_case_ : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ : str = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
snake_case_ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
snake_case_ : List[Any] = ordered_inputs["""attention_mask"""].dtype
snake_case_ : Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 1_3
| 58 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 1 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __lowerCAmelCase ( __UpperCamelCase : np.ndarray , __UpperCamelCase : float ):
'''simple docstring'''
snake_case_ : Dict = math.sqrt(__UpperCamelCase )
snake_case_ : str = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __lowerCAmelCase ( __UpperCamelCase : np.ndarray , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float ):
'''simple docstring'''
snake_case_ : Any = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __UpperCamelCase ):
for j in range(0 , __UpperCamelCase ):
snake_case_ : int = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : int , ):
'''simple docstring'''
snake_case_ : Any = np.zeros(img.shape )
snake_case_ : List[Any] = get_gauss_kernel(__UpperCamelCase , __UpperCamelCase )
snake_case_ , snake_case_ : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
snake_case_ : Tuple = get_slice(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Optional[Any] = img_s - img_s[kernel_size // 2, kernel_size // 2]
snake_case_ : int = vec_gaussian(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = np.multiply(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Tuple = np.multiply(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = np.sum(__UpperCamelCase ) / np.sum(__UpperCamelCase )
snake_case_ : Any = val
return imga
def __lowerCAmelCase ( __UpperCamelCase : list ):
'''simple docstring'''
snake_case_ : Any = args[1] if args[1:] else """../image_data/lena.jpg"""
snake_case_ : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
snake_case_ : List[Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
snake_case_ : Optional[Any] = int(args[4] )
snake_case_ : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
snake_case_ : Optional[Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = parse_args(sys.argv)
__lowerCAmelCase : Union[str, Any] = cva.imread(filename, 0)
cva.imshow('''input image''', img)
__lowerCAmelCase : List[Any] = img / 255
__lowerCAmelCase : List[Any] = out.astype('''float32''')
__lowerCAmelCase : Optional[Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__lowerCAmelCase : str = out * 255
__lowerCAmelCase : Union[str, Any] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 58 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Tuple = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = DebertaVaTokenizer
_lowerCamelCase = DebertaVaTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[Any] = DebertaVaTokenizer(_lowercase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = """this is a test"""
snake_case_ : Optional[int] = """this is a test"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = """<pad>"""
snake_case_ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(_lowercase ) , 3_0_0_0_1 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : str = """ \tHeLLo!how \n Are yoU? """
snake_case_ : Tuple = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
snake_case_ : Tuple = DebertaVaTokenizer(_lowercase , do_lower_case=_lowercase )
snake_case_ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = DebertaVaTokenizerFast(_lowercase , do_lower_case=_lowercase )
snake_case_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = """I was born in 92000, and this is falsé."""
snake_case_ : Optional[int] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case_ : List[str] = DebertaVaTokenizer(_lowercase , split_by_punct=_lowercase )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Dict = DebertaVaTokenizerFast(_lowercase , split_by_punct=_lowercase )
snake_case_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = """I was born in 92000, and this is falsé."""
snake_case_ : List[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case_ : Union[str, Any] = DebertaVaTokenizer(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
snake_case_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : int = DebertaVaTokenizerFast(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
snake_case_ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = """I was born in 92000, and this is falsé."""
snake_case_ : Optional[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
snake_case_ : Dict = DebertaVaTokenizer(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
snake_case_ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Any = DebertaVaTokenizerFast(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
snake_case_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = """I was born in 92000, and this is falsé."""
snake_case_ : List[str] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case_ : str = DebertaVaTokenizer(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
snake_case_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Dict = DebertaVaTokenizerFast(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
snake_case_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = """ \tHeLLo!how \n Are yoU? """
snake_case_ : Dict = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
snake_case_ : Union[str, Any] = DebertaVaTokenizer(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Tuple = DebertaVaTokenizerFast(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
snake_case_ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : List[Any] = self.get_rust_tokenizer()
snake_case_ : Dict = """I was born in 92000, and this is falsé."""
snake_case_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
snake_case_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case_ : Union[str, Any] = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Dict = self.get_rust_tokenizer()
snake_case_ : Any = tokenizer.encode(_lowercase )
snake_case_ : int = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = """This is a test"""
snake_case_ : Union[str, Any] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
snake_case_ : str = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
snake_case_ : List[str] = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
snake_case_ : str = DebertaVaTokenizer(_lowercase , keep_accents=_lowercase )
snake_case_ : Tuple = DebertaVaTokenizerFast(_lowercase , keep_accents=_lowercase )
snake_case_ : List[str] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : int = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Any = rust_tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# fmt: off
snake_case_ : Union[str, Any] = """I was born in 92000, and this is falsé."""
snake_case_ : List[str] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
snake_case_ : Optional[int] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
snake_case_ : str = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
snake_case_ : Tuple = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Tuple = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : str = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Optional[int] = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Dict = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : int = rust_tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = DebertaVaTokenizer(_lowercase )
snake_case_ : str = tokenizer.encode("""sequence builders""" )
snake_case_ : Tuple = tokenizer.encode("""multi-sequence build""" )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _lowercase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _lowercase , )
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = {"""input_ids""": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 1 |
"""simple docstring"""
import os
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(os.path.dirname(__UpperCamelCase ) + """/grid.txt""" ) as f:
snake_case_ : List[Any] = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
snake_case_ : Optional[Any] = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
snake_case_ : str = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case_ : int = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
snake_case_ : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case_ : str = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
snake_case_ : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case_ : Dict = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
snake_case_ : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case_ : Optional[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 58 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case_ : str = precision
snake_case_ : Any = ceil(precision / 1_4 )
snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : Optional[Any] = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : Optional[int] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = torch.exp(__UpperCamelCase )
snake_case_ : Optional[int] = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = config.output_attentions
snake_case_ : str = config.output_hidden_states
snake_case_ : List[str] = nn.ModuleList([BertLayer(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(_lowercase ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
if (type(_lowercase ) is float) or (type(_lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Dict = x
else:
snake_case_ : Union[str, Any] = x
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Any:
'''simple docstring'''
snake_case_ : str = ()
snake_case_ : str = ()
snake_case_ : List[str] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : int = all_hidden_states + (hidden_states,)
snake_case_ : Any = layer_module(
_lowercase , _lowercase , head_mask[i] , _lowercase , _lowercase )
snake_case_ : Dict = layer_outputs[0]
if self.output_attentions:
snake_case_ : str = all_attentions + (layer_outputs[1],)
snake_case_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : int = current_outputs + (all_attentions,)
snake_case_ : Optional[Any] = self.highway[i](_lowercase )
# logits, pooled_output
if not self.training:
snake_case_ : Tuple = highway_exit[0]
snake_case_ : List[str] = entropy(_lowercase )
snake_case_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowercase , i + 1 )
else:
snake_case_ : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : str = (hidden_states,)
if self.output_hidden_states:
snake_case_ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Union[str, Any] = outputs + (all_attentions,)
snake_case_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config
snake_case_ : int = BertEmbeddings(_lowercase )
snake_case_ : Tuple = DeeBertEncoder(_lowercase )
snake_case_ : int = BertPooler(_lowercase )
self.init_weights()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = value
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowercase )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ : Dict = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Dict = torch.ones(_lowercase , device=_lowercase )
if encoder_attention_mask is None:
snake_case_ : Tuple = torch.ones(_lowercase , device=_lowercase )
if token_type_ids is None:
snake_case_ : Any = torch.zeros(_lowercase , dtype=torch.long , device=_lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(_lowercase , _lowercase , _lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : Any = encoder_attention_mask[:, None, None, :]
snake_case_ : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : int = self.get_head_mask(_lowercase , self.config.num_hidden_layers )
snake_case_ : List[str] = self.embeddings(
input_ids=_lowercase , position_ids=_lowercase , token_type_ids=_lowercase , inputs_embeds=_lowercase )
snake_case_ : List[str] = self.encoder(
_lowercase , attention_mask=_lowercase , head_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case_ : Optional[Any] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_lowercase )
snake_case_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = message
snake_case_ : str = exit_layer # start from 1!
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = BertPooler(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(_lowercase )
# "return" pooler_output
# BertModel
snake_case_ : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Union[str, Any] = bmodel_output[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : List[str] = self.classifier(_lowercase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Tuple = config.num_hidden_layers
snake_case_ : Any = DeeBertModel(_lowercase )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> int:
'''simple docstring'''
snake_case_ : int = self.num_layers
try:
snake_case_ : Any = self.bert(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : str = outputs[1]
snake_case_ : Optional[int] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : Optional[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(_lowercase )
snake_case_ : int = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : List[Any] = MSELoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : str = (loss,) + outputs
if not self.training:
snake_case_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 58 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : str = 16
__lowerCAmelCase : Tuple = 32
def __lowerCAmelCase ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 1_6 ):
'''simple docstring'''
snake_case_ : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case_ : str = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : str = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : int = 1_6
elif accelerator.mixed_precision != "no":
snake_case_ : List[str] = 8
else:
snake_case_ : Tuple = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
snake_case_ : Tuple = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
snake_case_ : List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCAmelCase : List[Any] = mocked_dataloaders # noqa: F811
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
snake_case_ : Dict = 2
# Initialize accelerator
snake_case_ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["""lr"""]
snake_case_ : str = int(config["""num_epochs"""] )
snake_case_ : Tuple = int(config["""seed"""] )
snake_case_ : List[Any] = int(config["""batch_size"""] )
snake_case_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
snake_case_ : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : str = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
snake_case_ , snake_case_ : Union[str, Any] = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Dict = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
snake_case_ : List[str] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : str = model(**__UpperCamelCase )
snake_case_ : List[Any] = outputs.loss
snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case_ : List[str] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**__UpperCamelCase )
snake_case_ : Tuple = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : List[Any] = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case_ : List[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
snake_case_ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
snake_case_ : Any = parser.parse_args()
snake_case_ : Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 58 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''dinat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Dict = patch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : List[Any] = depths
snake_case_ : Any = len(_lowercase )
snake_case_ : str = num_heads
snake_case_ : Dict = kernel_size
snake_case_ : Optional[int] = dilations
snake_case_ : List[Any] = mlp_ratio
snake_case_ : List[str] = qkv_bias
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : int = drop_path_rate
snake_case_ : int = hidden_act
snake_case_ : str = layer_norm_eps
snake_case_ : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : str = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Optional[Any] = layer_scale_init_value
snake_case_ : Optional[int] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 58 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 1 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase ( __UpperCamelCase : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **_lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(_lowercase )
def __call__( self , _lowercase , _lowercase = None , **_lowercase , ) -> Any:
'''simple docstring'''
if "text_queries" in kwargs:
snake_case_ : Optional[Any] = kwargs.pop("""text_queries""" )
if isinstance(_lowercase , (str, Image.Image) ):
snake_case_ : int = {"""image""": image, """candidate_labels""": candidate_labels}
else:
snake_case_ : Optional[int] = image
snake_case_ : Any = super().__call__(_lowercase , **_lowercase )
return results
def UpperCAmelCase__ ( self , **_lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = {}
if "threshold" in kwargs:
snake_case_ : str = kwargs["""threshold"""]
if "top_k" in kwargs:
snake_case_ : Tuple = kwargs["""top_k"""]
return {}, {}, postprocess_params
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = load_image(inputs["""image"""] )
snake_case_ : Tuple = inputs["""candidate_labels"""]
if isinstance(_lowercase , _lowercase ):
snake_case_ : Dict = candidate_labels.split(""",""" )
snake_case_ : int = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_lowercase ):
snake_case_ : Dict = self.tokenizer(_lowercase , return_tensors=self.framework )
snake_case_ : Union[str, Any] = self.image_processor(_lowercase , return_tensors=self.framework )
yield {
"is_last": i == len(_lowercase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : int = model_inputs.pop("""target_size""" )
snake_case_ : int = model_inputs.pop("""candidate_label""" )
snake_case_ : Optional[Any] = model_inputs.pop("""is_last""" )
snake_case_ : Dict = self.model(**_lowercase )
snake_case_ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0.1 , _lowercase=None ) -> Any:
'''simple docstring'''
snake_case_ : Any = []
for model_output in model_outputs:
snake_case_ : List[str] = model_output["""candidate_label"""]
snake_case_ : Union[str, Any] = BaseModelOutput(_lowercase )
snake_case_ : Dict = self.image_processor.post_process_object_detection(
outputs=_lowercase , threshold=_lowercase , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
snake_case_ : List[Any] = outputs["""scores"""][index].item()
snake_case_ : List[str] = self._get_bounding_box(outputs["""boxes"""][index][0] )
snake_case_ : int = {"""score""": score, """label""": label, """box""": box}
results.append(_lowercase )
snake_case_ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x["score"] , reverse=_lowercase )
if top_k:
snake_case_ : int = results[:top_k]
return results
def UpperCAmelCase__ ( self , _lowercase ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = box.int().tolist()
snake_case_ : str = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
if curr_ind == len(__UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCamelCase ) ):
if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Insert current vertex into path as next transition
snake_case_ : List[str] = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ : Tuple = -1
return False
def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ):
'''simple docstring'''
snake_case_ : Tuple = [-1] * (len(__UpperCamelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
| 58 | 1 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , )
snake_case_ : Tuple = Generator(
cache_dir=_lowercase , features=_lowercase , generator=_lowercase , gen_kwargs=_lowercase , **_lowercase , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
if self.streaming:
snake_case_ : int = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
snake_case_ : Union[str, Any] = None
snake_case_ : Union[str, Any] = None
snake_case_ : Any = None
snake_case_ : str = None
self.builder.download_and_prepare(
download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , )
snake_case_ : Union[str, Any] = self.builder.as_dataset(
split="""train""" , verification_mode=_lowercase , in_memory=self.keep_in_memory )
return dataset
| 58 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
# add QFormer tokenizer
snake_case_ : List[str] = qformer_tokenizer
def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case_ : Optional[Any] = BatchFeature()
if text is not None:
snake_case_ : List[str] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase )
snake_case_ : Union[str, Any] = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" )
snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_lowercase )
return super().save_pretrained(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" )
snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase )
args.append(_lowercase )
return cls(*_lowercase )
| 58 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''ibert'''
def __init__( self , _lowercase=3_0_5_2_2 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=False , _lowercase="none" , **_lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case_ : List[Any] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Any = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Optional[int] = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : int = position_embedding_type
snake_case_ : int = quant_mode
snake_case_ : List[Any] = force_dequant
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 58 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58 | 1 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''perceiver'''
def __init__( self , _lowercase=2_5_6 , _lowercase=1_2_8_0 , _lowercase=7_6_8 , _lowercase=1 , _lowercase=2_6 , _lowercase=8 , _lowercase=8 , _lowercase=None , _lowercase=None , _lowercase="kv" , _lowercase=1 , _lowercase=1 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=True , _lowercase=2_6_2 , _lowercase=2_0_4_8 , _lowercase=5_6 , _lowercase=[3_6_8, 4_9_6] , _lowercase=1_6 , _lowercase=1_9_2_0 , _lowercase=1_6 , _lowercase=[1, 1_6, 2_2_4, 2_2_4] , **_lowercase , ) -> Any:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : int = num_latents
snake_case_ : Optional[Any] = d_latents
snake_case_ : Tuple = d_model
snake_case_ : List[str] = num_blocks
snake_case_ : str = num_self_attends_per_block
snake_case_ : List[Any] = num_self_attention_heads
snake_case_ : Any = num_cross_attention_heads
snake_case_ : str = qk_channels
snake_case_ : Optional[int] = v_channels
snake_case_ : Tuple = cross_attention_shape_for_attention
snake_case_ : List[str] = self_attention_widening_factor
snake_case_ : Optional[Any] = cross_attention_widening_factor
snake_case_ : List[str] = hidden_act
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : int = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Dict = use_query_residual
# masked language modeling attributes
snake_case_ : Any = vocab_size
snake_case_ : Optional[int] = max_position_embeddings
# image classification attributes
snake_case_ : List[Any] = image_size
# flow attributes
snake_case_ : Optional[Any] = train_size
# multimodal autoencoding attributes
snake_case_ : Dict = num_frames
snake_case_ : Optional[int] = audio_samples_per_frame
snake_case_ : List[Any] = samples_per_patch
snake_case_ : List[Any] = output_shape
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , _lowercase = 3 , _lowercase = 4_0 , _lowercase = 4_0 , ) -> Mapping[str, Any]:
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ : str = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ : List[str] = preprocessor.num_special_tokens_to_add(_lowercase )
snake_case_ : Optional[Any] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
snake_case_ : List[str] = [""" """.join(["""a"""] ) * seq_length] * batch_size
snake_case_ : str = dict(preprocessor(_lowercase , return_tensors=_lowercase ) )
snake_case_ : Optional[int] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowercase , _lowercase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ : List[Any] = compute_effective_axis_dimension(_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case_ : Dict = self._generate_dummy_images(_lowercase , _lowercase , _lowercase , _lowercase )
snake_case_ : List[Any] = dict(preprocessor(images=_lowercase , return_tensors=_lowercase ) )
snake_case_ : Any = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 58 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
__lowerCAmelCase : Tuple = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
__lowerCAmelCase : Union[str, Any] = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
__lowerCAmelCase : Optional[Any] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
snake_case_ : Union[str, Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
snake_case_ : int = evaluate(dataset=_lowercase , predictions=_lowercase )
return score
| 58 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''roformer'''
def __init__( self , _lowercase=5_0_0_0_0 , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_5_3_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase=False , _lowercase=True , **_lowercase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = vocab_size
snake_case_ : Any = hidden_size if embedding_size is None else embedding_size
snake_case_ : List[str] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : List[str] = rotary_value
snake_case_ : str = use_cache
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 58 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Any = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''conditional_detr'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _lowercase=True , _lowercase=None , _lowercase=3 , _lowercase=3_0_0 , _lowercase=6 , _lowercase=2_0_4_8 , _lowercase=8 , _lowercase=6 , _lowercase=2_0_4_8 , _lowercase=8 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase="relu" , _lowercase=2_5_6 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , _lowercase=False , _lowercase="sine" , _lowercase="resnet50" , _lowercase=True , _lowercase=False , _lowercase=2 , _lowercase=5 , _lowercase=2 , _lowercase=1 , _lowercase=1 , _lowercase=2 , _lowercase=5 , _lowercase=2 , _lowercase=0.25 , **_lowercase , ) -> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_lowercase , _lowercase ):
snake_case_ : List[str] = backbone_config.get("""model_type""" )
snake_case_ : Tuple = CONFIG_MAPPING[backbone_model_type]
snake_case_ : Any = config_class.from_dict(_lowercase )
snake_case_ : Optional[int] = use_timm_backbone
snake_case_ : Dict = backbone_config
snake_case_ : List[str] = num_channels
snake_case_ : List[str] = num_queries
snake_case_ : List[Any] = d_model
snake_case_ : List[Any] = encoder_ffn_dim
snake_case_ : Union[str, Any] = encoder_layers
snake_case_ : List[str] = encoder_attention_heads
snake_case_ : str = decoder_ffn_dim
snake_case_ : Tuple = decoder_layers
snake_case_ : int = decoder_attention_heads
snake_case_ : str = dropout
snake_case_ : List[Any] = attention_dropout
snake_case_ : List[Any] = activation_dropout
snake_case_ : List[str] = activation_function
snake_case_ : Union[str, Any] = init_std
snake_case_ : Union[str, Any] = init_xavier_std
snake_case_ : int = encoder_layerdrop
snake_case_ : Union[str, Any] = decoder_layerdrop
snake_case_ : List[Any] = encoder_layers
snake_case_ : Tuple = auxiliary_loss
snake_case_ : Union[str, Any] = position_embedding_type
snake_case_ : Optional[int] = backbone
snake_case_ : Any = use_pretrained_backbone
snake_case_ : Optional[int] = dilation
# Hungarian matcher
snake_case_ : int = class_cost
snake_case_ : Optional[int] = bbox_cost
snake_case_ : str = giou_cost
# Loss coefficients
snake_case_ : str = mask_loss_coefficient
snake_case_ : Optional[int] = dice_loss_coefficient
snake_case_ : int = cls_loss_coefficient
snake_case_ : Any = bbox_loss_coefficient
snake_case_ : Any = giou_loss_coefficient
snake_case_ : int = focal_alpha
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ : Tuple = self.backbone_config.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 1_2
| 58 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Dict = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : Tuple = flatten_dict(__UpperCamelCase )
return flax_params
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
snake_case_ : Optional[Any] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Optional[int] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Dict = flax_dict[key]
snake_case_ : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = get_flax_param(__UpperCamelCase )
if not use_large:
snake_case_ : Optional[int] = PixaStructVisionConfig()
snake_case_ : Optional[Any] = PixaStructTextConfig()
else:
snake_case_ : Tuple = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
snake_case_ : List[str] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
snake_case_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
snake_case_ : Optional[int] = PixaStructForConditionalGeneration(__UpperCamelCase )
snake_case_ : str = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
snake_case_ : int = PixaStructImageProcessor()
snake_case_ : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : int = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("""Model saved in {}""".format(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 58 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''data2vec-text'''
def __init__( self , _lowercase=3_0_5_2_2 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case_ : List[Any] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Tuple = hidden_act
snake_case_ : str = intermediate_size
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : Optional[int] = position_embedding_type
snake_case_ : List[Any] = use_cache
snake_case_ : Tuple = classifier_dropout
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 58 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__UpperCamelCase ) * abs(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 58 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowerCAmelCase : Any = logging.getLogger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''sequence-classification'''
def __init__( self , _lowercase ) -> str:
'''simple docstring'''
if type(_lowercase ) == dict:
snake_case_ : Optional[int] = Namespace(**_lowercase )
snake_case_ : str = glue_output_modes[hparams.task]
snake_case_ : List[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(_lowercase , _lowercase , self.mode )
def UpperCAmelCase__ ( self , **_lowercase ) -> List[str]:
'''simple docstring'''
return self.model(**_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case_ : Dict = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
snake_case_ : Optional[Any] = self(**_lowercase )
snake_case_ : str = outputs[0]
snake_case_ : str = self.trainer.lr_schedulers[0]["""scheduler"""]
snake_case_ : str = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = self.hparams
snake_case_ : List[Any] = processors[args.task]()
snake_case_ : Any = processor.get_labels()
for mode in ["train", "dev"]:
snake_case_ : str = self._feature_file(_lowercase )
if os.path.exists(_lowercase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , _lowercase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case_ : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
snake_case_ : Dict = convert_examples_to_features(
_lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , _lowercase )
torch.save(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = False ) -> DataLoader:
'''simple docstring'''
snake_case_ : Union[str, Any] = """dev""" if mode == """test""" else mode
snake_case_ : Dict = self._feature_file(_lowercase )
logger.info("""Loading features from cached file %s""" , _lowercase )
snake_case_ : Optional[int] = torch.load(_lowercase )
snake_case_ : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case_ : Optional[int] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
snake_case_ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case_ : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case_ : Any = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_lowercase , _lowercase , _lowercase , _lowercase ) , batch_size=_lowercase , shuffle=_lowercase , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case_ : Dict = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
snake_case_ : List[str] = self(**_lowercase )
snake_case_ , snake_case_ : Optional[int] = outputs[:2]
snake_case_ : Any = logits.detach().cpu().numpy()
snake_case_ : List[str] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase__ ( self , _lowercase ) -> tuple:
'''simple docstring'''
snake_case_ : Tuple = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
snake_case_ : str = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case_ : Optional[Any] = np.argmax(_lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case_ : str = np.squeeze(_lowercase )
snake_case_ : Optional[int] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case_ : Any = [[] for _ in range(out_label_ids.shape[0] )]
snake_case_ : int = [[] for _ in range(out_label_ids.shape[0] )]
snake_case_ : List[str] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , _lowercase , _lowercase )}
snake_case_ : List[str] = dict(results.items() )
snake_case_ : Optional[Any] = results
return ret, preds_list, out_label_list
def UpperCAmelCase__ ( self , _lowercase ) -> dict:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : List[str] = self._eval_end(_lowercase )
snake_case_ : Optional[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase__ ( self , _lowercase ) -> dict:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : Any = self._eval_end(_lowercase )
snake_case_ : Union[str, Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase__ ( _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(_lowercase , _lowercase )
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=_lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=_lowercase , required=_lowercase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=_lowercase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
snake_case_ : str = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
snake_case_ : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case_ : Tuple = os.path.join(
"""./results""" , F'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
snake_case_ : Optional[Any] = GLUETransformer(__UpperCamelCase )
snake_case_ : Union[str, Any] = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case_ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__UpperCamelCase ) )
snake_case_ : List[str] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main()
| 58 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionInpaintPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
snake_case_ : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case_ : Dict = CLIPTextModel(_lowercase )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : str = torch.manual_seed(_lowercase )
else:
snake_case_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**_lowercase )
snake_case_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowercase )
snake_case_ : List[str] = sd_pipe(**_lowercase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Optional[int] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = PNDMScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 58 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ShapEPipeline
_lowerCamelCase = ['''prompt''']
_lowerCamelCase = ['''prompt''']
_lowerCamelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowerCamelCase = False
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 8
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Any = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_6,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 3_2,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
snake_case_ : Dict = PriorTransformer(**_lowercase )
return model
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 1_2,
"""background""": (
0.1,
0.1,
0.1,
),
}
snake_case_ : str = ShapERenderer(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[str] = self.dummy_prior
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : str = self.dummy_tokenizer
snake_case_ : List[Any] = self.dummy_renderer
snake_case_ : Optional[int] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_0_2_4 , prediction_type="""sample""" , use_karras_sigmas=_lowercase , clip_sample=_lowercase , clip_sample_range=1.0 , )
snake_case_ : Optional[int] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> List[Any]:
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : Tuple = torch.manual_seed(_lowercase )
else:
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : List[Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 3_2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : Union[str, Any] = self.get_dummy_components()
snake_case_ : List[str] = self.pipeline_class(**_lowercase )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : str = pipe(**self.get_dummy_inputs(_lowercase ) )
snake_case_ : List[Any] = output.images[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
snake_case_ : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = torch_device == """cpu"""
snake_case_ : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowercase , relax_max_difference=_lowercase , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.get_dummy_components()
snake_case_ : Optional[Any] = self.pipeline_class(**_lowercase )
snake_case_ : Dict = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[Any] = 1
snake_case_ : str = 2
snake_case_ : int = self.get_dummy_inputs(_lowercase )
for key in inputs.keys():
if key in self.batch_params:
snake_case_ : int = batch_size * [inputs[key]]
snake_case_ : List[Any] = pipe(**_lowercase , num_images_per_prompt=_lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
snake_case_ : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
snake_case_ : str = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe(
"""a shark""" , generator=_lowercase , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="""np""" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 58 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case_ : Optional[int] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case_ : Optional[Any] = F'{src_lang}-{tgt_lang}'
snake_case_ : Dict = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ : List[str] = os.path.join(__UpperCamelCase , """README.md""" )
print(F'Generating {path}' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase : Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = model_name.split('''-''')
__lowerCAmelCase : Optional[int] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | 1 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCAmelCase :
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.get_dummy_input()
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def UpperCAmelCase__ ( self , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = 4
snake_case_ : Optional[Any] = 3_2
snake_case_ : int = (3_2, 3_2)
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Union[str, Any] = torch.device(_lowercase )
snake_case_ : Union[str, Any] = (batch_size, num_channels) + sizes
snake_case_ : List[Any] = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase )
snake_case_ : str = {"""hidden_states""": hidden_states}
if include_temb:
snake_case_ : Union[str, Any] = 1_2_8
snake_case_ : Dict = randn_tensor((batch_size, temb_channels) , generator=_lowercase , device=_lowercase )
if include_res_hidden_states_tuple:
snake_case_ : Any = torch.manual_seed(1 )
snake_case_ : Tuple = (randn_tensor(_lowercase , generator=_lowercase , device=_lowercase ),)
if include_encoder_hidden_states:
snake_case_ : Optional[Any] = floats_tensor((batch_size, 3_2, 3_2) ).to(_lowercase )
if include_skip_sample:
snake_case_ : List[str] = randn_tensor(((batch_size, 3) + sizes) , generator=_lowercase , device=_lowercase )
return dummy_input
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = {
"""in_channels""": 3_2,
"""out_channels""": 3_2,
"""temb_channels""": 1_2_8,
}
if self.block_type == "up":
snake_case_ : Optional[int] = 3_2
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
snake_case_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ : str = self.prepare_init_args_and_inputs_for_common()
snake_case_ : int = self.block_class(**_lowercase )
unet_block.to(_lowercase )
unet_block.eval()
with torch.no_grad():
snake_case_ : Dict = unet_block(**_lowercase )
if isinstance(_lowercase , _lowercase ):
snake_case_ : Dict = output[0]
self.assertEqual(output.shape , self.output_shape )
snake_case_ : int = output[0, -1, -3:, -3:]
snake_case_ : Dict = torch.tensor(_lowercase ).to(_lowercase )
assert torch_all_close(output_slice.flatten() , _lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
snake_case_ : Union[str, Any] = self.block_class(**_lowercase )
model.to(_lowercase )
model.train()
snake_case_ : Optional[int] = model(**_lowercase )
if isinstance(_lowercase , _lowercase ):
snake_case_ : List[str] = output[0]
snake_case_ : int = torch.device(_lowercase )
snake_case_ : Dict = randn_tensor(output.shape , device=_lowercase )
snake_case_ : Optional[Any] = torch.nn.functional.mse_loss(_lowercase , _lowercase )
loss.backward()
| 58 |
"""simple docstring"""
__lowerCAmelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : List[str] = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''microsoft/speecht5_tts'''
_lowerCamelCase = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_lowerCamelCase = '''text_reader'''
_lowerCamelCase = SpeechTaProcessor
_lowerCamelCase = SpeechTaForTextToSpeech
_lowerCamelCase = SpeechTaHifiGan
_lowerCamelCase = ['''text''']
_lowerCamelCase = ['''audio''']
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if self.post_processor is None:
snake_case_ : str = """microsoft/speecht5_hifigan"""
super().setup()
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.pre_processor(text=_lowercase , return_tensors="""pt""" , truncation=_lowercase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
snake_case_ : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
snake_case_ : Union[str, Any] = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase__ ( self , _lowercase ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_lowercase ).cpu().detach()
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=3 , _lowercase=2_2_4 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[int] = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 58 | 1 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ['''MobileViTFeatureExtractor''']
__lowerCAmelCase : Optional[int] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.