code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
def A ( lowercase , lowercase ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , a % b )
UpperCamelCase = a // b
return (y, x - k * y)
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , lowercase )
UpperCamelCase = na * na
UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , lowercase )
if b < 0:
UpperCamelCase = (b % n + n) % n
return b
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
UpperCamelCase = na * na
UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 222
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "spiece.model"}
_UpperCAmelCase : Dict = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
_UpperCAmelCase : int = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[Any] = ["input_ids", "attention_mask"]
__lowercase : List[int] = []
def __init__( self , A_ , A_="<unk>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="[SEP]" , A_="[MASK]" , A_="[CLS]" , A_ = None , **A_ , ) -> None:
"""simple docstring"""
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sep_token=A_ , mask_token=A_ , cls_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(A_ )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.sp_model.IdToPiece(A_ )
return token
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = ''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
UpperCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __UpperCamelCase ( self , A_ , A_ = False , A_ = None , A_ = True , **A_ , ) -> str:
"""simple docstring"""
UpperCamelCase = kwargs.pop('use_source_tokenizer' , A_ )
UpperCamelCase = self.convert_ids_to_tokens(A_ , skip_special_tokens=A_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A_ ) )
UpperCamelCase = []
sub_texts.append(A_ )
else:
current_sub_text.append(A_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCamelCase = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(A_ ) )
else:
UpperCamelCase = ''.join(A_ )
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(A_ )
return clean_text
else:
return text
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 222
| 1
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[str] = "T5Config"
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'mt5'
_a = MTaConfig
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'mt5'
_a = MTaConfig
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'mt5'
_a = MTaConfig
| 272
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Union[str, Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : str )-> Any:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[int] =controlnet_params
lowerCamelCase__ : Dict ='''bird'''
lowerCamelCase__ : List[str] =jax.device_count()
lowerCamelCase__ : Optional[Any] =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ : Optional[int] =jax.random.PRNGKey(0 )
lowerCamelCase__ : Dict =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : Tuple =replicate(lowerCamelCase )
lowerCamelCase__ : Tuple =shard(lowerCamelCase )
lowerCamelCase__ : Optional[int] =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : Any =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Dict =jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Dict =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[Any] =controlnet_params
lowerCamelCase__ : int ='''Chef in the kitchen'''
lowerCamelCase__ : Optional[Any] =jax.device_count()
lowerCamelCase__ : Any =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ : Tuple =jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : int =replicate(lowerCamelCase )
lowerCamelCase__ : List[Any] =shard(lowerCamelCase )
lowerCamelCase__ : int =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : List[str] =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : int =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Any =jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 272
| 1
|
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = int(__A)
# Initialize Result
_a = []
# Traverse through all denomination
for denomination in reversed(__A):
# Find denominations
while int(__A) >= int(__A):
total_value -= int(__A)
answer.append(__A) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowercase_ = []
lowercase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
lowercase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowercase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
lowercase_ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
lowercase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
lowercase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 211
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'xmod'
def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
_a = pre_norm
_a = adapter_reduction_factor
_a = adapter_layer_norm
_a = adapter_reuse_layer_norm
_a = ln_before_adapter
_a = list(A )
_a = default_language
class __A ( A ):
'''simple docstring'''
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 211
| 1
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( lowerCamelCase__ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (UnCLIPScheduler,)
def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ):
A_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__A )
return config
def __A ( self : Tuple ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __A ( self : List[Any] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def __A ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def __A ( self : Dict ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__A )
def __A ( self : int ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def __A ( self : int ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A , prev_timestep=__A )
def __A ( self : int ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(variance_type="fixed_small_log" )
A_ = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def __A ( self : int ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(variance_type="learned_range" )
A_ = scheduler_class(**__A )
A_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__A ) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__A ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__A ) - -0.0_010_011 < 1E-5
def __A ( self : List[str] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__A )
A_ = scheduler.timesteps
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
A_ = model(__A , __A )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(__A ) )
A_ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 252.2682495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__A )
scheduler.set_timesteps(25 )
A_ = scheduler.timesteps
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
A_ = model(__A , __A )
if i + 1 == timesteps.shape[0]:
A_ = None
else:
A_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(
__A , __A , __A , prev_timestep=__A , generator=__A ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(__A ) )
A_ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 258.2044983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def __A ( self : List[str] ):
pass
def __A ( self : List[Any] ):
pass
| 361
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329
| 0
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
snake_case_ : List[str] = numpy.array([0, 0])
snake_case_ : str = numpy.array([0.5, 0.8_66_02_54])
snake_case_ : Optional[int] = numpy.array([1, 0])
snake_case_ : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = initial_vectors
for _ in range(_A ):
_UpperCamelCase : Optional[int] = iteration_step(_A )
return vectors
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Any = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(_A )
_UpperCamelCase : List[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Any = numpy.radians(_A )
_UpperCamelCase , _UpperCamelCase : Optional[int] = numpy.cos(_A ), numpy.sin(_A )
_UpperCamelCase : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_A , _A )
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : str = zip(*_A )
plt.plot(_A , _A )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : List[str] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 83
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
| 0
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_A : Tuple =[
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : List[Any] = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCamelCase__ : str = g.get_repo("""huggingface/diffusers""" )
lowerCamelCase__ : List[Any] = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCamelCase__ : Optional[int] = sorted(issue.get_comments() , key=lambda UpperCamelCase : i.created_at , reverse=UpperCAmelCase_ )
lowerCamelCase__ : Optional[int] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 357
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_A : int =imread(r'''digital_image_processing/image_data/lena_small.jpg''')
_A : Optional[Any] =cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE_ () -> Any:
lowerCamelCase__ : int = cn.convert_to_negative(UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCamelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase__ : Union[str, Any] = canny.canny(UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE_ () -> str:
assert gg.gaussian_filter(UpperCamelCase , 5 , sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE_ () -> int:
# laplace diagonals
lowerCamelCase__ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase__ : int = conv.img_convolve(UpperCamelCase , UpperCamelCase ).astype(UpperCamelCase )
assert res.any()
def SCREAMING_SNAKE_CASE_ () -> Any:
assert med.median_filter(UpperCamelCase , 3 ).any()
def SCREAMING_SNAKE_CASE_ () -> Any:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = sob.sobel_filter(UpperCamelCase )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE_ () -> Tuple:
lowerCamelCase__ : Union[str, Any] = sp.make_sepia(UpperCamelCase , 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = bs.Burkes(imread(UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]:
lowerCamelCase__ : int = rs.NearestNeighbour(imread(UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
lowerCamelCase__ : Tuple = imread(UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase__ : int = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Optional[Any] = image[x_coordinate][y_coordinate]
lowerCamelCase__ : str = lbp.get_neighbors_pixel(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase__ : List[str] = lbp.local_binary_value(UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert lbp_image.any()
| 129
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
return np.sqrt(np.sum((np.asarray(lowercase__ ) - np.asarray(lowercase__ )) ** 2 ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
return sum((va - va) ** 2 for va, va in zip(lowercase__ , lowercase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __a ( ) ->Tuple:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 290
|
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = n
while left <= right:
_lowerCamelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : str = mid - 1
else:
_lowerCamelCase : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_50, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_00, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_00, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=A__ , )
assert hasattr(self , """env""" )
def __A ( self , A__ ):
A__ : str = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
A__ : Tuple = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A__ , instance_count=A__ , instance_type=self.instance_type , debugger_hook_config=A__ , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A__ , py_version="""py36""" , )
def __A ( self , A__ ):
TrainingJobAnalytics(A__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self , A__ ):
# create estimator
A__ : Any = self.create_estimator(A__ )
# run training
estimator.fit()
# result dataframe
A__ : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
A__ : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ : Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A__ )
| 141
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : str = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _a :
'''simple docstring'''
def __init__( self , A__=None , **A__ ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A__ : Dict = model
A__ : Any = kwargs.get("""model_save_dir""" , A__ )
A__ : Optional[int] = kwargs.get("""latest_model_name""" , A__ )
def __call__( self , **A__ ):
A__ : int = {k: np.array(A__ ) for k, v in kwargs.items()}
return self.model.run(A__ , A__ )
@staticmethod
def __A ( A__ , A__=None , A__=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A__ : List[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(A__ , providers=[provider] , sess_options=A__ )
def __A ( self , A__ , A__ = None , **A__ ):
A__ : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A__ : List[Any] = self.model_save_dir.joinpath(self.latest_model_name )
A__ : Optional[int] = Path(A__ ).joinpath(A__ )
try:
shutil.copyfile(A__ , A__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A__ : str = self.model_save_dir.joinpath(A__ )
if src_path.exists():
A__ : List[str] = Path(A__ ).joinpath(A__ )
try:
shutil.copyfile(A__ , A__ )
except shutil.SameFileError:
pass
def __A ( self , A__ , **A__ , ):
if os.path.isfile(A__ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(A__ , exist_ok=A__ )
# saving model weights/files
self._save_pretrained(A__ , **A__ )
@classmethod
def __A ( cls , A__ , A__ = None , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , A__ = None , **A__ , ):
A__ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(A__ ):
A__ : Dict = OnnxRuntimeModel.load_model(
os.path.join(A__ , A__ ) , provider=A__ , sess_options=A__ )
A__ : Optional[Any] = Path(A__ )
# load model from hub
else:
# download model
A__ : Union[str, Any] = hf_hub_download(
repo_id=A__ , filename=A__ , use_auth_token=A__ , revision=A__ , cache_dir=A__ , force_download=A__ , )
A__ : List[str] = Path(A__ ).parent
A__ : str = Path(A__ ).name
A__ : Optional[int] = OnnxRuntimeModel.load_model(A__ , provider=A__ , sess_options=A__ )
return cls(model=A__ , **A__ )
@classmethod
def __A ( cls , A__ , A__ = True , A__ = None , A__ = None , **A__ , ):
A__ : Optional[Any] = None
if len(str(A__ ).split("""@""" ) ) == 2:
A__ , A__ : Union[str, Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=A__ , revision=A__ , cache_dir=A__ , force_download=A__ , use_auth_token=A__ , **A__ , )
| 141
| 1
|
import socket
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCamelCase__ : Optional[int] = socket.gethostname()
lowerCamelCase__ : Optional[Any] = 12312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
lowerCamelCase__ : Union[str, Any] = sock.recv(1024 )
if not data:
break
out_file.write(UpperCamelCase__ )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 184
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return "".join(sorted(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return word_by_signature[signature(UpperCamelCase__ )]
_UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()})
_UpperCAmelCase : List[str] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 285
| 0
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = str(id_ )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = []
UpperCamelCase = {} # {vertex:distance}
def __lt__( self : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
return self.key < other.key
def __repr__( self : Tuple ):
"""simple docstring"""
return self.id
def A ( self : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
self.neighbors.append(UpperCamelCase__ )
def A ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = weight
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> str:
"""simple docstring"""
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , A__ )
graph[b - 1].add_edge(graph[a - 1] , A__ )
def __lowerCamelCase ( A__ , A__ ) -> list:
"""simple docstring"""
UpperCamelCase = []
for u in graph:
UpperCamelCase = math.inf
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = graph[:]
while q:
UpperCamelCase = min(A__ )
q.remove(A__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCamelCase = u
UpperCamelCase = u.edges[v.id]
for i in range(1 , len(A__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __lowerCamelCase ( A__ , A__ ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
UpperCamelCase = math.inf
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = list(A__ )
hq.heapify(A__ )
while h:
UpperCamelCase = hq.heappop(A__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCamelCase = u
UpperCamelCase = u.edges[v.id]
hq.heapify(A__ )
for i in range(1 , len(A__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XGLMConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=1_4 , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=9_9 , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]=5_1_2 , UpperCamelCase__ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = activation_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = 2
UpperCamelCase = 1
def A ( self : Union[str, Any] ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = self.get_config()
UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A ( self : Union[str, Any] ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCamelCase__ , )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (TFXGLMForCausalLM,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFXGLMModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=3_7 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def A ( self : List[str] ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFXGLMModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def A ( self : Dict ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] , UpperCamelCase__ : Tuple=True ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
UpperCamelCase = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCamelCase = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCamelCase = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , seed=[7, 0] )
UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = 'left'
# use different length sentences to test batching
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='tf' , padding=UpperCamelCase__ )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=1_2 )
UpperCamelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=1_2 )
UpperCamelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=1_2 )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
| 249
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''')}) , supervised_keys=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()})]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase)
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''')})}) , supervised_keys=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()})
]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[int]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase)
def lowerCamelCase ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowerCamelCase ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@require_beam
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''')}))
__UpperCamelCase :str = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1])
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
@require_beam
def UpperCamelCase__ ( self) -> Any:
import apache_beam as beam
__UpperCamelCase :int = beam.io.parquetio.WriteToParquet
__UpperCamelCase :Optional[int] = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
with patch('''apache_beam.io.parquetio.WriteToParquet''') as write_parquet_mock:
__UpperCamelCase :List[Any] = partial(__lowercase , num_shards=2)
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''')}))
__UpperCamelCase :Dict = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content''']) , sorted(['''foo''', '''bar''', '''foobar''']))
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
@require_beam
def UpperCamelCase__ ( self) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase)
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare)
@require_beam
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = len(get_test_nested_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Tuple = NestedBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""")))
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''')})}))
__UpperCamelCase :Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1])
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
| 43
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4
| 0
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __magic_name__ (tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = max_length
lowerCAmelCase_ = vocab
lowerCAmelCase_ = merges
lowerCAmelCase_ = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def __a ( cls , _a , *_a , **_a ) -> List[Any]:
lowerCAmelCase_ = [" ".join(_a ) for m in tokenizer.bpe_ranks.keys()]
lowerCAmelCase_ = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def __a ( cls , _a , *_a , **_a ) -> List[str]:
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def __a ( cls , _a ) -> Tuple:
return cls(**_a )
def __a ( self ) -> List[str]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __a ( self , _a , _a = None ) -> List[str]:
lowerCAmelCase_ = self.tf_tokenizer(_a )
lowerCAmelCase_ = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCAmelCase_ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCAmelCase_ , lowerCAmelCase_ = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 22
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 1
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
snake_case : Tuple = "__DUMMY_TRANSFORMERS_USER__"
snake_case : List[Any] = "Dummy User"
snake_case : int = "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"
snake_case : int = "https://hub-ci.huggingface.co"
snake_case : Tuple = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
snake_case : Tuple = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
snake_case : Union[str, Any] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def lowerCAmelCase_ ( _snake_case : Dict ) -> Any:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _snake_case )
@pytest.fixture
def lowerCAmelCase_ ( _snake_case : int ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _snake_case )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _snake_case )
@pytest.fixture
def lowerCAmelCase_ ( _snake_case : Dict ) -> int:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _snake_case )
@pytest.fixture
def lowerCAmelCase_ ( _snake_case : int , _snake_case : str ) -> Optional[int]:
'''simple docstring'''
HfFolder.save_token(_snake_case )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
return HfApi(endpoint=_snake_case )
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : HfApi ) -> Any:
'''simple docstring'''
__magic_name__ : List[Any] = HfFolder.get_token()
HfFolder.save_token(_snake_case )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_snake_case )
@pytest.fixture
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
def _cleanup_repo(_snake_case : Optional[Any] ):
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_snake_case : Optional[Any] ):
try:
yield repo_id
finally:
cleanup_repo(_snake_case )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : HfApi , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = F'''repo_txt_data-{int(time.time() * 10E3 )}'''
__magic_name__ : List[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_snake_case , token=_snake_case , repo_type="dataset" , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo="data/text_data.txt" , repo_id=_snake_case , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : List[Any] ) -> Any:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : HfApi , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Union[str, Any] = F'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
__magic_name__ : List[str] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_snake_case , token=_snake_case , repo_type="dataset" , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo="data.zip" , repo_id=_snake_case , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : int ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : HfApi , _snake_case : Tuple , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
__magic_name__ : int = F'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
__magic_name__ : int = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_snake_case , token=_snake_case , repo_type="dataset" , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo="data.zip" , repo_id=_snake_case , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : int , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 281
|
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281
| 1
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = 0
if start < end:
_snake_case : Tuple = randint(snake_case__ , snake_case__ )
_snake_case : List[Any] = a[end]
_snake_case : Dict = a[pivot]
_snake_case : Any = temp
_snake_case : Tuple = _in_place_partition(snake_case__ , snake_case__ , snake_case__ )
count += _in_place_quick_sort(snake_case__ , snake_case__ , p - 1 )
count += _in_place_quick_sort(snake_case__ , p + 1 , snake_case__ )
return count
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : List[Any] = 0
_snake_case : Dict = randint(snake_case__ , snake_case__ )
_snake_case : Tuple = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case : str = start - 1
for index in range(snake_case__ , snake_case__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Dict = new_pivot_index + 1
_snake_case : Optional[int] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : Dict = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : Union[str, Any] = a[end]
_snake_case : int = temp
return new_pivot_index + 1, count
A_ = TemporaryFile()
A_ = 1_00 # 1000 elements are to be sorted
A_ , A_ = 0, 1 # mean and standard deviation
A_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
A_ = np.load(outfile)
A_ = len(M) - 1
A_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 350
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
_snake_case : Dict = set({"""(""", """[""", """{"""} )
_snake_case : Union[str, Any] = set({""")""", """]""", """}"""} )
_snake_case : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(snake_case__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(snake_case__ ) == 0 or (len(snake_case__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(snake_case__ ) == 0
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = input("""Enter sequence of brackets: """ )
if is_balanced(snake_case__ ):
print(snake_case__ , """is balanced""" )
else:
print(snake_case__ , """is not balanced""" )
if __name__ == "__main__":
main()
| 132
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
_lowerCAmelCase : str = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Any = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Dict = self.get_rust_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_image_processor()
_lowerCAmelCase : Tuple = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : Tuple = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : int = self.get_tokenizer(cls_token="(CLS)", sep_token="(SEP)")
_lowerCAmelCase : Dict = self.get_image_processor(do_normalize=__a)
_lowerCAmelCase : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname, cls_token="(CLS)", sep_token="(SEP)", do_normalize=__a)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : str = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Union[str, Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Optional[Any] = "Alexandra,T-shirt的价格是15便士。"
_lowerCAmelCase : Union[str, Any] = processor(text=__a)
_lowerCAmelCase : str = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Dict = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "Alexandra,T-shirt的价格是15便士。"
_lowerCAmelCase : Any = self.prepare_image_inputs()
_lowerCAmelCase : Dict = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.get_image_processor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Union[str, Any] = processor.batch_decode(__a)
_lowerCAmelCase : Tuple = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Dict = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Any = "Alexandra,T-shirt的价格是15便士。"
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
| 36
|
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
| 0
|
"""simple docstring"""
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : Union[str, Any], A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = [False] * len(A_ )
_lowerCamelCase : Optional[Any] = []
queue.append(A_ )
_lowerCamelCase : Union[str, Any] = True
while queue:
_lowerCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
_lowerCamelCase : Any = True
_lowerCamelCase : List[str] = u
return visited[t]
def snake_case_ ( A_ : List[str], A_ : Tuple, A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = [-1] * (len(A_ ))
_lowerCamelCase : List[Any] = 0
while bfs(A_, A_, A_, A_ ):
_lowerCamelCase : List[Any] = float('''Inf''' )
_lowerCamelCase : Tuple = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Dict = min(A_, graph[parent[s]][s] )
_lowerCamelCase : Tuple = parent[s]
max_flow += path_flow
_lowerCamelCase : str = sink
while v != source:
_lowerCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : Union[str, Any] = parent[v]
return max_flow
lowerCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase__ , lowerCAmelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 175
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase__ = logging.getLogger(__name__)
class __snake_case ( _lowercase):
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=None ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
_lowerCamelCase : Dict = None
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
_lowerCamelCase : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_lowerCamelCase : Dict = str(distributed_port + 1 )
_lowerCamelCase : str = dist.new_group(ranks=__lowerCAmelCase , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=torch.floataa ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = torch.empty(__lowerCAmelCase , dtype=__lowerCAmelCase )
dist.scatter(__lowerCAmelCase , src=0 , scatter_list=__lowerCAmelCase , group=self.process_group )
return target_tensor
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_lowerCamelCase : str = next((addr for addr in addrs if addr.startswith('''e''' )) , __lowerCAmelCase )
return ifname
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
if not dist.is_initialized():
_lowerCamelCase , _lowerCamelCase : Any = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
# distributed training
_lowerCamelCase : Dict = dist.get_world_size(group=self.process_group )
# gather logic
_lowerCamelCase : str = None
if self._is_main():
_lowerCamelCase : List[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCAmelCase )]
dist.gather(torch.tensor(__lowerCAmelCase ) , dst=0 , gather_list=__lowerCAmelCase , group=self.process_group )
# scatter logic
_lowerCamelCase : int = question_hidden_states.shape[0]
_lowerCamelCase : str = []
_lowerCamelCase : Optional[int] = []
if self._is_main():
assert len(__lowerCAmelCase ) == world_size
_lowerCamelCase , _lowerCamelCase : Tuple = self._main_retrieve(torch.cat(__lowerCAmelCase ).numpy() , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : int = self._scattered(__lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
_lowerCamelCase : str = self._scattered(__lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCAmelCase )
| 175
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple =["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
a : Union[str, Any] = size if size is not None else {"shortest_edge": 224}
a : int = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
a : int = crop_size if crop_size is not None else {"height": 256, "width": 256}
a : str = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
a : Dict = do_resize
a : List[Any] = size
a : Any = resample
a : Dict = do_rescale
a : Optional[Any] = rescale_factor
a : List[Any] = do_center_crop
a : List[str] = crop_size
a : int = do_flip_channel_order
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PIL.Image.BILINEAR , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : int = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
a : Optional[int] = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : Union[str, Any] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[Any]:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> np.ndarray:
return flip_channel_order(lowerCAmelCase__ , data_format=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
a : Dict = do_resize if do_resize is not None else self.do_resize
a : int = resample if resample is not None else self.resample
a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
a : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
a : str = size if size is not None else self.size
a : Optional[int] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
a : Tuple = crop_size if crop_size is not None else self.crop_size
a : Union[str, Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
a : Tuple = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
a : Optional[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
a : Any = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
a : Optional[int] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
a : Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
a : Union[str, Any] = [self.flip_channel_order(image=lowerCAmelCase__ ) for image in images]
a : int = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
a : Optional[int] = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple:
a : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCAmelCase__ ):
a : Dict = target_sizes.numpy()
a : int = []
for idx in range(len(lowerCAmelCase__ ) ):
a : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCAmelCase__ )
a : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
a : List[str] = logits.argmax(dim=1 )
a : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 105
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
__snake_case : Any = logging.getLogger(__name__)
__snake_case : Any = {"""facebook/bart-base""": BartForConditionalGeneration}
__snake_case : Tuple = {"""facebook/bart-base""": BartTokenizer}
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""")
parser.add_argument(
"""--validation_file""" , type=a__ , default=a__ , help="""A csv or a json file containing the validation data.""")
parser.add_argument(
"""--max_length""" , type=a__ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=a__ , default=a__ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=a__ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=a__ , )
parser.add_argument(
"""--config_name""" , type=a__ , default=a__ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=a__ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=a__ , default=a__ , help="""Where to store the final ONNX file.""")
a_ : Any = parser.parse_args()
return args
def _UpperCAmelCase ( a__ , a__="cpu"):
'''simple docstring'''
a_ : Optional[int] = model_dict[model_name].from_pretrained(a__).to(a__)
a_ : List[str] = tokenizer_dict[model_name].from_pretrained(a__)
if model_name in ["facebook/bart-base"]:
a_ : Tuple = 0
a_ : Optional[int] = None
a_ : Union[str, Any] = 0
return huggingface_model, tokenizer
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
model.eval()
a_ : Optional[Any] = None
a_ : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(a__))
with torch.no_grad():
a_ : Any = """My friends are cool but they eat too many carbs."""
a_ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""").to(model.device)
a_ : Optional[int] = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=a__ , max_length=a__ , early_stopping=a__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
a__ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , a__ , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=a__ , )
logger.info("""Model exported to {}""".format(a__))
a_ : List[str] = remove_dup_initializers(os.path.abspath(a__))
logger.info("""Deduplicated and optimized model written to {}""".format(a__))
a_ : Union[str, Any] = onnxruntime.InferenceSession(a__)
a_ : Any = ort_sess.run(
a__ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(a__),
"""max_length""": np.array(a__),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3)
logger.info("""Model outputs from torch and ONNX Runtime are similar.""")
logger.info("""Success.""")
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = parse_args()
a_ : str = 5
a_ : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
a_ : int = torch.device(args.device)
a_ , a_ : Optional[Any] = load_model_tokenizer(args.model_name_or_path , a__)
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""")
model.to(a__)
if args.max_length:
a_ : List[str] = args.max_length
if args.num_beams:
a_ : Optional[Any] = args.num_beams
if args.output_file_path:
a_ : Optional[int] = args.output_file_path
else:
a_ : Tuple = """BART.onnx"""
logger.info("""Exporting model to ONNX""")
export_and_validate_model(a__ , a__ , a__ , a__ , a__)
if __name__ == "__main__":
main()
| 248
| 0
|
import cmath
import math
def __magic_name__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> complex:
__lowerCamelCase = math.radians(__lowerCAmelCase )
__lowerCamelCase = math.radians(__lowerCAmelCase )
# Convert voltage and current to rectangular form
__lowerCamelCase = cmath.rect(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = cmath.rect(__lowerCAmelCase , __lowerCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
from __future__ import annotations
def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool:
return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
| 0
|
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase :
def __init__( self : int , UpperCAmelCase : int ) -> None:
lowerCamelCase__ : Dict = value
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
class lowerCAmelCase :
def __init__( self : Optional[int] , UpperCAmelCase : Node ) -> None:
lowerCamelCase__ : Union[str, Any] = tree
def A_ ( self : Union[str, Any] , UpperCAmelCase : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Tuple ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85
| 0
|
"""simple docstring"""
import argparse
from collections import defaultdict
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(__lowerCamelCase , '''r''' ) as f:
lowerCamelCase__ : Any =f.readlines()
lowerCamelCase__ : Optional[Any] =f'''class {class_name}('''
lowerCamelCase__ : List[str] =f'''{4 * " "}def {test_name}('''
lowerCamelCase__ : Tuple =f'''{8 * " "}{correct_line.split()[0]}'''
lowerCamelCase__ : List[Any] =f'''{16 * " "}{correct_line.split()[0]}'''
lowerCamelCase__ : Union[str, Any] =False
lowerCamelCase__ : Union[str, Any] =False
lowerCamelCase__ : int =False
lowerCamelCase__ : Tuple =False
lowerCamelCase__ : Any =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =[]
for line in lines:
if line.startswith(__lowerCamelCase ):
lowerCamelCase__ : Optional[int] =True
elif in_class and line.startswith(__lowerCamelCase ):
lowerCamelCase__ : Any =True
elif in_class and in_func and (line.startswith(__lowerCamelCase ) or line.startswith(__lowerCamelCase )):
lowerCamelCase__ : Dict =len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCamelCase__ : List[str] =True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCamelCase__ : Tuple =True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
lowerCamelCase__ : Tuple =False
else:
new_lines.append(__lowerCamelCase )
with open(__lowerCamelCase , '''w''' ) as f:
for line in new_lines:
f.write(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None ):
"""simple docstring"""
if fail is not None:
with open(__lowerCamelCase , '''r''' ) as f:
lowerCamelCase__ : List[Any] ={l.strip() for l in f.readlines()}
else:
lowerCamelCase__ : List[str] =None
with open(__lowerCamelCase , '''r''' ) as f:
lowerCamelCase__ : Union[str, Any] =f.readlines()
lowerCamelCase__ : List[Any] =defaultdict(__lowerCamelCase )
for line in correct_lines:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
_lowercase : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 272
|
"""simple docstring"""
from collections import defaultdict
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : List[str] )-> Optional[int]:
lowerCamelCase__ : List[Any] =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase__ : Optional[Any] =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase ) )
]
lowerCamelCase__ : Any =defaultdict(lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase__ : List[Any] =(1 << len(lowerCamelCase )) - 1
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : Any )-> Any:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase__ : Optional[int] =self.count_ways_until(lowerCamelCase, task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 )
# save the value.
lowerCamelCase__ : int =total_ways_util
return self.dp[mask][task_no]
def snake_case ( self : Dict, lowerCamelCase : Dict )-> int:
# Store the list of persons for each task
for i in range(len(lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1 )
if __name__ == "__main__":
_lowercase : Tuple = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowercase : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 272
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_lowerCAmelCase :Dict = logging.getLogger(__name__)
_lowerCAmelCase :Union[str, Any] = {'facebook/bart-base': BartForConditionalGeneration}
_lowerCAmelCase :Dict = {'facebook/bart-base': BartTokenizer}
def lowerCamelCase_ ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=UpperCamelCase__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase__ , )
parser.add_argument(
'''--config_name''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=UpperCamelCase__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Where to store the final ONNX file.''' )
_UpperCAmelCase : Optional[int] = parser.parse_args()
return args
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]="cpu" ):
_UpperCAmelCase : Tuple = model_dict[model_name].from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer_dict[model_name].from_pretrained(UpperCamelCase__ )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Optional[Any] = 0
return huggingface_model, tokenizer
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
model.eval()
_UpperCAmelCase : str = None
_UpperCAmelCase : List[str] = torch.jit.script(BARTBeamSearchGenerator(UpperCamelCase__ ) )
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = '''My friends are cool but they eat too many carbs.'''
_UpperCAmelCase : Tuple = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
_UpperCAmelCase : Optional[int] = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=UpperCamelCase__ , max_length=UpperCamelCase__ , early_stopping=UpperCamelCase__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCamelCase__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCamelCase__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=UpperCamelCase__ , )
logger.info('''Model exported to {}'''.format(UpperCamelCase__ ) )
_UpperCAmelCase : int = remove_dup_initializers(os.path.abspath(UpperCamelCase__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(UpperCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = onnxruntime.InferenceSession(UpperCamelCase__ )
_UpperCAmelCase : Dict = ort_sess.run(
UpperCamelCase__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(UpperCamelCase__ ),
'''max_length''': np.array(UpperCamelCase__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def lowerCamelCase_ ():
_UpperCAmelCase : Union[str, Any] = parse_args()
_UpperCAmelCase : Optional[Any] = 5
_UpperCAmelCase : Optional[int] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase : Union[str, Any] = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase : Dict = load_model_tokenizer(args.model_name_or_path , UpperCamelCase__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(UpperCamelCase__ )
if args.max_length:
_UpperCAmelCase : Dict = args.max_length
if args.num_beams:
_UpperCAmelCase : Any = args.num_beams
if args.output_file_path:
_UpperCAmelCase : Optional[int] = args.output_file_path
else:
_UpperCAmelCase : Optional[Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 263
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263
| 1
|
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ : int = set()
# edges = list of graph's edges
lowercase__ : Union[str, Any] = get_edges(_lowerCamelCase)
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase__ , lowercase__ : List[str] = edges.pop()
chosen_vertices.add(_lowerCamelCase)
chosen_vertices.add(_lowerCamelCase)
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowerCamelCase)
return chosen_vertices
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node))
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333
| 1
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1.0 , snake_case_ = None , ):
super().__init__()
_lowerCAmelCase : List[Any] = initial_learning_rate
_lowerCAmelCase : Optional[int] = warmup_steps
_lowerCAmelCase : Tuple = power
_lowerCAmelCase : Optional[Any] = decay_schedule_fn
_lowerCAmelCase : List[Any] = name
def __call__( self , snake_case_ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_lowerCAmelCase : List[str] = tf.cast(snake_case_ , tf.floataa )
_lowerCAmelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
_lowerCAmelCase : Tuple = global_step_float / warmup_steps_float
_lowerCAmelCase : Union[str, Any] = self.initial_learning_rate * tf.math.pow(snake_case_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=snake_case_ , )
def __UpperCamelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.0 , _lowerCamelCase : float = 0.9 , _lowerCamelCase : float = 0.999 , _lowerCamelCase : float = 1e-8 , _lowerCamelCase : Optional[float] = None , _lowerCamelCase : Optional[float] = None , _lowerCamelCase : float = 0.0 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : Optional[List[str]] = None , ) -> List[str]:
_lowerCAmelCase : Optional[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCamelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCamelCase , )
if num_warmup_steps:
_lowerCAmelCase : Tuple = WarmUp(
initial_learning_rate=_lowerCamelCase , decay_schedule_fn=_lowerCamelCase , warmup_steps=_lowerCamelCase , )
if weight_decay_rate > 0.0:
_lowerCAmelCase : int = AdamWeightDecay(
learning_rate=_lowerCamelCase , weight_decay_rate=_lowerCamelCase , beta_a=_lowerCamelCase , beta_a=_lowerCamelCase , epsilon=_lowerCamelCase , clipnorm=_lowerCamelCase , global_clipnorm=_lowerCamelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_lowerCamelCase , )
else:
_lowerCAmelCase : int = tf.keras.optimizers.Adam(
learning_rate=_lowerCamelCase , beta_a=_lowerCamelCase , beta_a=_lowerCamelCase , epsilon=_lowerCamelCase , clipnorm=_lowerCamelCase , global_clipnorm=_lowerCamelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a_ (_a ):
def __init__( self , snake_case_ = 0.001 , snake_case_ = 0.9 , snake_case_ = 0.999 , snake_case_ = 1E-7 , snake_case_ = False , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "AdamWeightDecay" , **snake_case_ , ):
super().__init__(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
_lowerCAmelCase : List[str] = weight_decay_rate
_lowerCAmelCase : Union[str, Any] = include_in_weight_decay
_lowerCAmelCase : Tuple = exclude_from_weight_decay
@classmethod
def __UpperCamelCase ( cls , snake_case_ ):
_lowerCAmelCase : Dict = {"""WarmUp""": WarmUp}
return super(snake_case_ , cls ).from_config(snake_case_ , custom_objects=snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
super(snake_case_ , self )._prepare_local(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None , **snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase : str = list(zip(*snake_case_ ) )
return super(snake_case_ , self ).apply_gradients(zip(snake_case_ , snake_case_ ) , name=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_lowerCAmelCase : str = apply_state or {}
_lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_lowerCAmelCase : Tuple = self._fallback_apply_state(snake_case_ , snake_case_ )
_lowerCAmelCase : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=None ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , snake_case_ )
_lowerCAmelCase : List[str] = self._decay_weights_op(snake_case_ , snake_case_ , snake_case_ )
with tf.control_dependencies([decay] ):
return super(snake_case_ , self )._resource_apply_dense(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
_lowerCAmelCase , _lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , snake_case_ )
_lowerCAmelCase : int = self._decay_weights_op(snake_case_ , snake_case_ , snake_case_ )
with tf.control_dependencies([decay] ):
return super(snake_case_ , self )._resource_apply_sparse(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def __UpperCamelCase ( self , snake_case_ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(snake_case_ , snake_case_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(snake_case_ , snake_case_ ) is not None:
return False
return True
class a_ (_a ):
def __init__( self ):
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Dict = None
@property
def __UpperCamelCase ( self ):
if self._accum_steps is None:
_lowerCAmelCase : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=snake_case_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __UpperCamelCase ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , snake_case_ ):
if not self._gradients:
_lowerCAmelCase : Optional[int] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(snake_case_ ) , trainable=snake_case_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(snake_case_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(snake_case_ )}' )
for accum_gradient, gradient in zip(self._gradients , snake_case_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(snake_case_ )
self._accum_steps.assign_add(1 )
def __UpperCamelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(snake_case_ ) )
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""")
# Using `do_sample=False` to force deterministic output
lowercase_ = text_generator("""This is a test""" , do_sample=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
lowercase_ = text_generator(["""This is a test""", """This is a second test"""])
self.assertEqual(
lowerCAmelCase_ , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
lowercase_ = text_generator("""This is a test""" , do_sample=lowerCAmelCase_ , num_return_sequences=2 , return_tensors=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
] , )
lowercase_ = text_generator.model.config.eos_token_id
lowercase_ = """<pad>"""
lowercase_ = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase_ , )
self.assertEqual(
lowerCAmelCase_ , [
[
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
],
[
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
{"""generated_token_ids""": ANY(lowerCAmelCase_)},
],
] , )
@require_tf
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""")
# Using `do_sample=False` to force deterministic output
lowercase_ = text_generator("""This is a test""" , do_sample=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
lowercase_ = text_generator(["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = TextGenerationPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_)
return text_generator, ["This is a test", "Another test"]
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = """Hello I believe in"""
lowercase_ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""")
lowercase_ = text_generator(lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
lowercase_ = text_generator(lowerCAmelCase_ , stop_sequence=""" fe""")
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": """Hello I believe in fe"""}])
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = text_generator.model
lowercase_ = text_generator.tokenizer
lowercase_ = text_generator("""This is a test""")
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": ANY(lowerCAmelCase_)}])
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test"""))
lowercase_ = text_generator("""This is a test""" , return_full_text=lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": ANY(lowerCAmelCase_)}])
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""])
lowercase_ = pipeline(task="""text-generation""" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , return_full_text=lowerCAmelCase_)
lowercase_ = text_generator("""This is a test""")
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": ANY(lowerCAmelCase_)}])
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""])
lowercase_ = text_generator("""This is a test""" , return_full_text=lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": ANY(lowerCAmelCase_)}])
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test"""))
lowercase_ = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
[{"""generated_text""": ANY(lowerCAmelCase_)}, {"""generated_text""": ANY(lowerCAmelCase_)}],
[{"""generated_text""": ANY(lowerCAmelCase_)}, {"""generated_text""": ANY(lowerCAmelCase_)}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowercase_ = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase_)
self.assertEqual(
lowerCAmelCase_ , [
[{"""generated_text""": ANY(lowerCAmelCase_)}, {"""generated_text""": ANY(lowerCAmelCase_)}],
[{"""generated_text""": ANY(lowerCAmelCase_)}, {"""generated_text""": ANY(lowerCAmelCase_)}],
] , )
with self.assertRaises(lowerCAmelCase_):
lowercase_ = text_generator("""test""" , return_full_text=lowerCAmelCase_ , return_text=lowerCAmelCase_)
with self.assertRaises(lowerCAmelCase_):
lowercase_ = text_generator("""test""" , return_full_text=lowerCAmelCase_ , return_tensors=lowerCAmelCase_)
with self.assertRaises(lowerCAmelCase_):
lowercase_ = text_generator("""test""" , return_text=lowerCAmelCase_ , return_tensors=lowerCAmelCase_)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowercase_ = text_generator("""""")
self.assertEqual(lowerCAmelCase_ , [{"""generated_text""": ANY(lowerCAmelCase_)}])
else:
with self.assertRaises((ValueError, AssertionError)):
lowercase_ = text_generator("""""")
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowercase_ = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0)
lowercase_ = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0)
# Hole strategy cannot work
with self.assertRaises(lowerCAmelCase_):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
import torch
# Classic `model_kwargs`
lowercase_ = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
lowercase_ = pipe("""This is a test""")
self.assertEqual(
lowerCAmelCase_ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowercase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa)
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
lowercase_ = pipe("""This is a test""")
self.assertEqual(
lowerCAmelCase_ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowercase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""")
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa)
lowercase_ = pipe("""This is a test""")
self.assertEqual(
lowerCAmelCase_ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
import torch
lowercase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa)
pipe("""This is a test""")
@require_torch
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self : str):
"""simple docstring"""
import torch
lowercase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa)
pipe("""This is a test""" , do_sample=lowerCAmelCase_ , top_p=0.5)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = """Hello world"""
lowercase_ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""")
if text_generator.model.framework == "tf":
lowercase_ = logging.get_logger("""transformers.generation.tf_utils""")
else:
lowercase_ = logging.get_logger("""transformers.generation.utils""")
lowercase_ = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowerCAmelCase_) as cl:
lowercase_ = text_generator(lowerCAmelCase_ , max_length=1_0 , max_new_tokens=1)
self.assertIn(lowerCAmelCase_ , cl.out)
# The user only sets one -> no warning
with CaptureLogger(lowerCAmelCase_) as cl:
lowercase_ = text_generator(lowerCAmelCase_ , max_new_tokens=1)
self.assertNotIn(lowerCAmelCase_ , cl.out)
with CaptureLogger(lowerCAmelCase_) as cl:
lowercase_ = text_generator(lowerCAmelCase_ , max_length=1_0)
self.assertNotIn(lowerCAmelCase_ , cl.out)
| 313
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
lowercase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_)
lowercase_ = tokenizer
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase_) , 1_0_1_1_2_2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowercase_ = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(lowerCAmelCase_)
lowercase_ = rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowerCAmelCase_)
lowercase_ = rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase_ , )
| 313
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Tuple = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = """sew-d"""
def __init__( self : Optional[Any] , snake_case_ : Tuple=3_2 , snake_case_ : Optional[Any]=7_6_8 , snake_case_ : Tuple=1_2 , snake_case_ : Union[str, Any]=1_2 , snake_case_ : Tuple=3_0_7_2 , snake_case_ : Tuple=2 , snake_case_ : int=5_1_2 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Union[str, Any]=True , snake_case_ : Any=True , snake_case_ : str=("p2c", "c2p") , snake_case_ : Dict="layer_norm" , snake_case_ : str="gelu_python" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Any=0.0 , snake_case_ : Tuple=0.1 , snake_case_ : Union[str, Any]=0.0_2 , snake_case_ : str=1e-7 , snake_case_ : Optional[Any]=1e-5 , snake_case_ : Optional[Any]="group" , snake_case_ : Tuple="gelu" , snake_case_ : Tuple=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_ : Dict=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_ : int=False , snake_case_ : Union[str, Any]=1_2_8 , snake_case_ : int=1_6 , snake_case_ : Any=True , snake_case_ : Tuple=0.0_5 , snake_case_ : Tuple=1_0 , snake_case_ : Dict=2 , snake_case_ : Tuple=0.0 , snake_case_ : List[Any]=1_0 , snake_case_ : Union[str, Any]=0 , snake_case_ : Any="mean" , snake_case_ : Optional[Any]=False , snake_case_ : Any=False , snake_case_ : Tuple=2_5_6 , snake_case_ : int=0 , snake_case_ : Optional[Any]=1 , snake_case_ : List[str]=2 , **snake_case_ : List[str] , ):
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(snake_case_ )
_UpperCAmelCase = list(snake_case_ )
_UpperCAmelCase = list(snake_case_ )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = squeeze_factor
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = position_buckets
_UpperCAmelCase = share_att_key
_UpperCAmelCase = relative_attention
_UpperCAmelCase = norm_rel_ebd
_UpperCAmelCase = list(snake_case_ )
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = feature_layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# sequence classification
_UpperCAmelCase = use_weighted_layer_sum
_UpperCAmelCase = classifier_proj_size
@property
def lowercase ( self : Any ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 22
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
def __init__( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Tuple=7 ,__lowercase :Optional[Any]=3 ,__lowercase :Dict=3_0 ,__lowercase :Union[str, Any]=4_0_0 ,__lowercase :Optional[int]=True ,__lowercase :int=None ,__lowercase :int=0.9 ,__lowercase :Optional[int]=None ,__lowercase :Dict=True ,__lowercase :str=[0.5, 0.5, 0.5] ,__lowercase :str=[0.5, 0.5, 0.5] ,):
snake_case__ : List[Any] = size if size is not None else {'''shortest_edge''': 3_0}
snake_case__ : Any = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : str = do_resize_and_center_crop
snake_case__ : Dict = size
snake_case__ : Union[str, Any] = crop_pct
snake_case__ : List[str] = crop_size
snake_case__ : Optional[Any] = do_normalize
snake_case__ : Tuple = image_mean
snake_case__ : List[str] = image_std
def __lowerCamelCase ( self :Any ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Any = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Tuple = PoolFormerImageProcessingTester(self )
@property
def __lowerCamelCase ( self :Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__lowercase ,'''size''' ) )
self.assertTrue(hasattr(__lowercase ,'''crop_pct''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_std''' ) )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size ,{'''height''': 3_0, '''width''': 3_0} )
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def __lowerCamelCase ( self :int ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : Union[str, Any] = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCamelCase ( self :List[str] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : str = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,torch.Tensor )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : str = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 357
|
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case__ : List[str] = 1
snake_case__ : int = 1
while repunit:
snake_case__ : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCAmelCase ( __lowerCAmelCase = 1000000 ) -> int:
"""simple docstring"""
snake_case__ : str = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 44
| 0
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :Union[str, "sqlalchemy.sql.Selectable"] , lowerCAmelCase__ :Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , **lowerCAmelCase__ :Union[str, Any] , ) -> Union[str, Any]:
super().__init__(features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , **lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = Sql(
cache_dir=lowerCAmelCase_ , features=lowerCAmelCase_ , sql=lowerCAmelCase_ , con=lowerCAmelCase_ , **lowerCAmelCase_ , )
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , )
# Build dataset for splits
__SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split='''train''' , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
class _lowercase :
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :Dataset , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :List[Any] , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__SCREAMING_SNAKE_CASE : List[Any] = dataset
__SCREAMING_SNAKE_CASE : Dict = name
__SCREAMING_SNAKE_CASE : List[str] = con
__SCREAMING_SNAKE_CASE : Tuple = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__SCREAMING_SNAKE_CASE : Any = num_proc
__SCREAMING_SNAKE_CASE : Optional[Any] = to_sql_kwargs
def __magic_name__( self :Any ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = self.to_sql_kwargs.pop('''sql''' , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE : Dict = self.to_sql_kwargs.pop('''index''' , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE : Tuple = self._write(index=lowerCAmelCase_ , **self.to_sql_kwargs )
return written
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :int ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = args
__SCREAMING_SNAKE_CASE : Optional[Any] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
__SCREAMING_SNAKE_CASE : Optional[int] = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__SCREAMING_SNAKE_CASE : Dict = batch.to_pandas()
__SCREAMING_SNAKE_CASE : Optional[Any] = df.to_sql(self.name , self.con , index=lowerCAmelCase_ , **lowerCAmelCase_ )
return num_rows or len(lowerCAmelCase_ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :str ) -> int:
__SCREAMING_SNAKE_CASE : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__SCREAMING_SNAKE_CASE : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCAmelCase_ , lowerCAmelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 9
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int = None , lowerCAmelCase_ : int = None ) -> str:
'''simple docstring'''
super().__init__()
A__ : Optional[Any] =pad_token_id
A__ : int =max_length
A__ : Optional[int] =vocab
A__ : Any =merges
A__ : Optional[Any] =BytePairTokenizer(lowerCAmelCase_ , lowerCAmelCase_ , sequence_length=lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls : Optional[int] , lowerCAmelCase_ : GPTaTokenizer , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
A__ : Any =[""" """.join(lowerCAmelCase_ ) for m in tokenizer.bpe_ranks.keys()]
A__ : List[str] =tokenizer.get_vocab()
return cls(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls : Tuple , lowerCAmelCase_ : Union[str, os.PathLike] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =GPTaTokenizer.from_pretrained(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
return cls.from_tokenizer(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls : str , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return cls(**lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int = None ) -> Tuple:
'''simple docstring'''
A__ : Optional[int] =self.tf_tokenizer(lowerCAmelCase_ )
A__ : List[Any] =tf.ones_like(lowerCAmelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
A__ : Union[str, Any] =max_length if max_length is not None else self.max_length
if max_length is not None:
A__ , A__ : Any =pad_model_inputs(
lowerCAmelCase_ , max_seq_length=lowerCAmelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 134
| 0
|
'''simple docstring'''
import baseaa
def UpperCAmelCase_ ( __lowercase : str ) -> bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def UpperCAmelCase_ ( __lowercase : bytes ) -> str:
'''simple docstring'''
return baseaa.aaadecode(__lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = {'''vocab_file''': '''spiece.model'''}
__SCREAMING_SNAKE_CASE :Any = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__SCREAMING_SNAKE_CASE :int = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = VOCAB_FILES_NAMES
_lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , snake_case_ : Any , snake_case_ : Optional[Any]=False , snake_case_ : int=False , snake_case_ : Any=False , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=None , snake_case_ : Tuple=None , snake_case_ : Any=None , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Any , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCAmelCase = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_UpperCAmelCase = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_UpperCAmelCase = "<|endoftext|>" if eos_token is None else eos_token
_UpperCAmelCase = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_UpperCAmelCase = unk_token if pad_token is None else pad_token
_UpperCAmelCase = eos_token if bos_token is None else bos_token
else:
_UpperCAmelCase = "<pad>" if pad_token is None else pad_token
_UpperCAmelCase = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# Used for whitespace normalization in input texts
# fmt : off
_UpperCAmelCase = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_UpperCAmelCase = re.compile(
f'[{"".join(map(snake_case_ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Optional[Any] ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Any , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowercase ( self : Dict ):
return len(self.sp_model )
def lowercase ( self : Optional[Any] , snake_case_ : str ):
_UpperCAmelCase = self.non_printing_characters_re.sub("" , snake_case_ )
# Normalize whitespaces
_UpperCAmelCase = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_UpperCAmelCase = unicodedata.normalize("NFC" , snake_case_ )
return text
def lowercase ( self : List[str] , snake_case_ : str , **snake_case_ : List[str] ):
_UpperCAmelCase = self.preprocess_text(snake_case_ )
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowercase ( self : str , snake_case_ : str ):
return self.sp_model.PieceToId(snake_case_ )
def lowercase ( self : int , snake_case_ : int ):
return self.sp_model.IdToPiece(snake_case_ )
@staticmethod
def lowercase ( snake_case_ : str ):
return out_string
def lowercase ( self : Any , snake_case_ : List[str] ):
_UpperCAmelCase = []
_UpperCAmelCase = ""
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(snake_case_ )
_UpperCAmelCase = False
out_string += self.sp_model.decode(snake_case_ )
return out_string
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self : Optional[int] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def lowercase ( self : Any , snake_case_ : Union[str, List[str]] , snake_case_ : Union[str, bool] = False ):
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = self.preprocess_text(snake_case_ )
_UpperCAmelCase = self.sp_model.encode(snake_case_ )
else:
_UpperCAmelCase = [self.preprocess_text(snake_case_ ) for t in text]
_UpperCAmelCase = self.sp_model.encode(snake_case_ )
if return_tensors is True or return_tensors == "pt":
_UpperCAmelCase = torch.tensor(snake_case_ )
return token_ids
def lowercase ( self : Optional[Any] , snake_case_ : Union[int, List[int]] ):
return self.sp_model.decode(snake_case_ )
def lowercase ( self : List[str] , snake_case_ : "Conversation" ):
_UpperCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_UpperCAmelCase = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(snake_case_ ) + f'{self.bos_token}Bot:'
)
return self.encode(text=snake_case_ )
| 156
| 1
|
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase_ ( UpperCamelCase__ : int = 200_0000 ) -> int:
"""simple docstring"""
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(UpperCamelCase__ )
__lowerCamelCase = ceil(UpperCamelCase__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__lowerCamelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , x.transpose() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , np.asarray(transpose(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.reshape(lowerCamelCase__ , (4, 3) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.reshape(lowerCamelCase__ , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.asarray(reshape(lowerCamelCase__ , (4, 3) ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.asarray(reshape(lowerCamelCase__ , (12, 5) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.squeeze(lowerCamelCase__ ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.squeeze(lowerCamelCase__ , axis=2 ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.asarray(squeeze(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.asarray(squeeze(lowerCamelCase__ , axis=2 ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.expand_dims(lowerCamelCase__ , axis=1 ) ) )
@require_torch
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase__ , axis=1 ) ) ) )
| 90
| 1
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 1
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase : Optional[Any] = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.')
@require_torch
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :List[Any] , a :Path , a :Union[str, None] = None , a :Union[List[str], None] = None , a :Union[str, List[str], None] = None , a :bool = True , ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = [file for file in os.listdir(a ) if os.path.isfile(os.path.join(a , a ) )]
if identifier is not None:
__UpperCamelCase : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a ):
for n_ in n_identifier:
__UpperCamelCase : Any = [file for file in files if n_ not in file]
else:
__UpperCamelCase : Any = [file for file in files if n_identifier not in file]
__UpperCamelCase : Any = ignore_files or []
ignore_files.append("__init__.py" )
__UpperCamelCase : Dict = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a )
if only_modules:
__UpperCamelCase : Optional[int] = file.split("." )[0]
try:
__UpperCamelCase : Optional[int] = getattr(a , a )
__UpperCamelCase : Tuple = doctest.DocTestSuite(a )
__UpperCamelCase : Union[str, Any] = unittest.TextTestRunner().run(a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
__UpperCamelCase : int = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : int = Path("src/transformers" )
__UpperCamelCase : int = "modeling"
__UpperCamelCase : Optional[int] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a )
def _lowerCamelCase ( self :Optional[Any] ) -> Tuple:
__UpperCamelCase : str = Path("src/transformers" )
__UpperCamelCase : List[Any] = "tokenization"
self.analyze_directory(a , identifier=a )
def _lowerCamelCase ( self :str ) -> List[str]:
__UpperCamelCase : Optional[int] = Path("src/transformers" )
__UpperCamelCase : Optional[int] = "configuration"
self.analyze_directory(a , identifier=a )
def _lowerCamelCase ( self :Optional[int] ) -> Optional[int]:
__UpperCamelCase : Tuple = Path("src/transformers" )
__UpperCamelCase : Dict = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : Dict = Path("docs/source" )
__UpperCamelCase : List[str] = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a )
| 232
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'pegasus'
_A = ['past_key_values']
_A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self :Dict , a :Dict=5_0_2_6_5 , a :Dict=1_0_2_4 , a :Union[str, Any]=1_2 , a :Any=4_0_9_6 , a :str=1_6 , a :str=1_2 , a :Optional[Any]=4_0_9_6 , a :int=1_6 , a :Optional[int]=0.0 , a :Optional[int]=0.0 , a :List[Any]=True , a :Union[str, Any]=True , a :int="gelu" , a :Dict=1_0_2_4 , a :List[Any]=0.1 , a :List[str]=0.0 , a :List[Any]=0.0 , a :str=0.02 , a :int=0 , a :Any=False , a :Dict=0 , a :int=1 , a :Optional[Any]=1 , **a :Optional[int] , ) -> str:
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : Union[str, Any] = max_position_embeddings
__UpperCamelCase : str = d_model
__UpperCamelCase : Dict = encoder_ffn_dim
__UpperCamelCase : int = encoder_layers
__UpperCamelCase : int = encoder_attention_heads
__UpperCamelCase : List[Any] = decoder_ffn_dim
__UpperCamelCase : List[Any] = decoder_layers
__UpperCamelCase : List[str] = decoder_attention_heads
__UpperCamelCase : str = dropout
__UpperCamelCase : Union[str, Any] = attention_dropout
__UpperCamelCase : List[str] = activation_dropout
__UpperCamelCase : Optional[Any] = activation_function
__UpperCamelCase : Tuple = init_std
__UpperCamelCase : Optional[int] = encoder_layerdrop
__UpperCamelCase : Union[str, Any] = decoder_layerdrop
__UpperCamelCase : Optional[Any] = use_cache
__UpperCamelCase : Union[str, Any] = encoder_layers
__UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
@property
def _lowerCamelCase ( self :Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self :Optional[Any] ) -> int:
return self.d_model
| 232
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase: List[str] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = filter(lambda __snake_case : p.requires_grad ,model.parameters() )
lowerCamelCase__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_a = logging.getLogger(__name__)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
if metric == "rouge2":
lowerCamelCase__ = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowerCamelCase__ = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowerCamelCase__ = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
lowerCamelCase__ = ModelCheckpoint(
dirpath=__snake_case ,filename=__snake_case ,monitor=F'val_{metric}' ,mode='''max''' ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F'val_{metric}' ,mode='''min''' if '''loss''' in metric else '''max''' ,patience=__snake_case ,verbose=__snake_case ,)
class __A ( pl.Callback ):
'''simple docstring'''
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCAmelCase )
@rank_zero_only
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ):
'''simple docstring'''
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
lowerCamelCase__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCamelCase__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCamelCase__ = od / '''test_results.txt'''
lowerCamelCase__ = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCamelCase__ = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
lowerCamelCase__ = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__lowerCAmelCase )
generations_file.parent.mkdir(exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , '''a+''' ) as writer:
for key in sorted(__lowerCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCamelCase__ = metrics[key]
if isinstance(__lowerCAmelCase , torch.Tensor ):
lowerCamelCase__ = val.item()
lowerCamelCase__ = F'{key}: {val:.6f}\n'
writer.write(__lowerCAmelCase )
if not save_generations:
return
if "preds" in metrics:
lowerCamelCase__ = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__lowerCAmelCase )
@rank_zero_only
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
try:
lowerCamelCase__ = pl_module.model.model.num_parameters()
except AttributeError:
lowerCamelCase__ = pl_module.model.num_parameters()
lowerCamelCase__ = count_trainable_parameters(__lowerCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__lowerCAmelCase , __lowerCAmelCase , '''test''' )
@rank_zero_only
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 209
|
import numpy as np
from transformers import Pipeline
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = np.max(__snake_case ,axis=-1 ,keepdims=__snake_case )
lowerCamelCase__ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=__snake_case )
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {}
if "second_text" in kwargs:
lowerCamelCase__ = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
'''simple docstring'''
return self.tokenizer(__lowerCAmelCase , text_pair=__lowerCAmelCase , return_tensors=self.framework )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = model_outputs.logits[0].numpy()
lowerCamelCase__ = softmax(__lowerCAmelCase )
lowerCamelCase__ = np.argmax(__lowerCAmelCase )
lowerCamelCase__ = self.model.config.idalabel[best_class]
lowerCamelCase__ = probabilities[best_class].item()
lowerCamelCase__ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 209
| 1
|
'''simple docstring'''
def __magic_name__( lowerCamelCase = 4_0_0_0_0_0_0):
__lowerCAmelCase = [0, 1]
__lowerCAmelCase = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
__lowerCAmelCase = 0
for j in range(len(__SCREAMING_SNAKE_CASE) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 369
|
'''simple docstring'''
from math import sqrt
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool"
return status
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2, n + 1))
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase)):
for j in range(i + 1, len(lowerCamelCase)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1):
if is_prime(lowerCamelCase):
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase):
while quotient != 1:
if is_prime(lowerCamelCase) and (quotient % factor == 0):
ans.append(lowerCamelCase)
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = max(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = min(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 == 0
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 != 0
def __magic_name__( lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase)
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (len(lowerCamelCase) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = prime_factorization(lowerCamelCase)
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(max(lowerCamelCase, lowerCamelCase)):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase):
ans += 1
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime(
lowerCamelCase), "'ans' must been a prime number and from type int"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase)
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and ans[0] != p_number_a
and ans[len(lowerCamelCase) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1, n + 1):
if n % divisor == 0:
ans.append(lowerCamelCase)
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(lowerCamelCase)
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase))
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1, n + 1):
ans *= factor
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 9
| 0
|
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = FlaxAutoencoderKL
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = 4
A = 3
A = (32, 32)
A = jax.random.PRNGKey(0 )
A = jax.random.uniform(A_ ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A = self.dummy_input
return init_dict, inputs_dict
| 74
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 74
| 1
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Tuple:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __magic_name__ :
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> List[str]:
pass
def UpperCAmelCase_ ( self )-> List[Any]:
pass
def UpperCAmelCase_ ( self )-> Any:
pass
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Dict:
UpperCamelCase_ = np.abs((a - b) ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F"Difference between torch and flax is {diff} (>= {tol})." )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase )-> Optional[Any]:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase )-> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {"vision_model": vision_model, "text_model": text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {"vision_model": vision_model, "text_model": text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = after_output[0]
UpperCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {"vision_model": vision_model, "text_model": text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = to_atuple(vision_model.config.image_size )
UpperCamelCase_ = to_atuple(vision_model.config.patch_size )
UpperCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase_ = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> List[Any]:
pt_model.to(__SCREAMING_SNAKE_CASE )
pt_model.eval()
# prepare inputs
UpperCamelCase_ = inputs_dict
UpperCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCamelCase_ = pt_model(**__SCREAMING_SNAKE_CASE ).to_tuple()
UpperCamelCase_ = fx_model(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE , from_pt=__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = fx_model_loaded(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = VisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE , from_flax=__SCREAMING_SNAKE_CASE )
pt_model_loaded.to(__SCREAMING_SNAKE_CASE )
pt_model_loaded.eval()
with torch.no_grad():
UpperCamelCase_ = pt_model_loaded(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2 )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Tuple:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = fx_state
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , fx_model.params )
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = config_inputs_dict.pop("vision_config" )
UpperCamelCase_ = config_inputs_dict.pop("text_config" )
UpperCamelCase_ = config_inputs_dict
self.check_equivalence_pt_to_flax(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_equivalence_flax_to_pt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ , UpperCamelCase_ = self.get_pretrained_model_and_inputs()
UpperCamelCase_ = model_a(**__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model_a(**__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = after_outputs[0]
UpperCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-5 )
@require_flax
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=__SCREAMING_SNAKE_CASE , text_from_pt=__SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = 13
UpperCamelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_ = random_attention_mask([batch_size, 4] )
UpperCamelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> int:
UpperCamelCase_ = FlaxViTModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FlaxBertModel(__SCREAMING_SNAKE_CASE )
return vision_model, text_model
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = FlaxViTModelTester(self )
UpperCamelCase_ = FlaxBertModelTester(self )
UpperCamelCase_ = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase_ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = vision_config_and_inputs
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=__SCREAMING_SNAKE_CASE , text_from_pt=__SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = 13
UpperCamelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_ = random_attention_mask([batch_size, 4] )
UpperCamelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> List[Any]:
UpperCamelCase_ = FlaxCLIPVisionModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = FlaxBertModel(__SCREAMING_SNAKE_CASE )
return vision_model, text_model
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = FlaxCLIPVisionModelTester(self )
UpperCamelCase_ = FlaxBertModelTester(self )
UpperCamelCase_ = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase_ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = vision_config_and_inputs
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __magic_name__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase_ = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="np" )
UpperCamelCase_ = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCamelCase_ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 352
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE :int = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __magic_name__ :
UpperCamelCase_ :str = PegasusConfig
UpperCamelCase_ :List[str] = {}
UpperCamelCase_ :str = """gelu"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=False , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=20 , _lowercase=2 , _lowercase=1 , _lowercase=0 , )-> Tuple:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCamelCase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_pegasus_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = 20
UpperCamelCase_ = model_class_name(_lowercase )
UpperCamelCase_ = model.encode(inputs_dict["input_ids"] )
UpperCamelCase_ , UpperCamelCase_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
UpperCamelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
UpperCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
UpperCamelCase_ = model.decode(_lowercase , _lowercase )
UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Tuple:
UpperCamelCase_ = 20
UpperCamelCase_ = model_class_name(_lowercase )
UpperCamelCase_ = model.encode(inputs_dict["input_ids"] )
UpperCamelCase_ , UpperCamelCase_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCamelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
UpperCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
UpperCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
UpperCamelCase_ = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , )-> Tuple:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase_ = np.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCamelCase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase_ :Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :Any = False
UpperCamelCase_ :Union[str, Any] = False
UpperCamelCase_ :Tuple = False
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = FlaxPegasusModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase )
UpperCamelCase_ = model_class(_lowercase )
@jax.jit
def encode_jitted(_lowercase , _lowercase=None , **_lowercase ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest("JIT Enabled" ):
UpperCamelCase_ = encode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase_ = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCamelCase_ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase , _lowercase , _lowercase ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest("JIT Enabled" ):
UpperCamelCase_ = decode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase_ = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self )-> int:
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_lowercase )
UpperCamelCase_ = np.ones((1, 1) )
UpperCamelCase_ = model(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
UpperCamelCase_ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
UpperCamelCase_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCamelCase_ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
UpperCamelCase_ = tokenizer(_lowercase , return_tensors="np" , truncation=_lowercase , max_length=512 , padding=_lowercase )
UpperCamelCase_ = model.generate(**_lowercase , num_beams=2 ).sequences
UpperCamelCase_ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
assert tgt_text == decoded
| 60
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=1_8 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , ):
lowercase = size if size is not None else {'height': 1_8, 'width': 1_8}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a ( a_, unittest.TestCase ):
UpperCAmelCase_ : Optional[int] =DPTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowercase = DPTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size' ) )
def UpperCamelCase_ ( self ):
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 220
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('google/mt5-small' )
_SCREAMING_SNAKE_CASE =tokenizer('Hello there' , return_tensors='tf' ).input_ids
_SCREAMING_SNAKE_CASE =tokenizer('Hi I am' , return_tensors='tf' ).input_ids
_SCREAMING_SNAKE_CASE =model(_a , labels=_a ).loss
_SCREAMING_SNAKE_CASE =-tf.math.reduce_mean(_a ).numpy()
_SCREAMING_SNAKE_CASE =-21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 114
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[int] = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 114
| 1
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
set_seed(770)
__lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowercase = os.path.dirname(os.path.abspath(__file__))
__lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :str = model_type
if use_small:
key += "_small"
return os.path.join(SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , local_dir=SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ):
'''simple docstring'''
if model_type == "text":
__UpperCamelCase :Tuple = BarkSemanticModel
__UpperCamelCase :List[str] = BarkSemanticConfig
__UpperCamelCase :Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
__UpperCamelCase :int = BarkCoarseModel
__UpperCamelCase :Optional[Any] = BarkCoarseConfig
__UpperCamelCase :Tuple = BarkCoarseGenerationConfig
elif model_type == "fine":
__UpperCamelCase :Any = BarkFineModel
__UpperCamelCase :Union[str, Any] = BarkFineConfig
__UpperCamelCase :Any = BarkFineGenerationConfig
else:
raise NotImplementedError()
__UpperCamelCase :Union[str, Any] = f"""{model_type}_small""" if use_small else model_type
__UpperCamelCase :Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
__UpperCamelCase :List[str] = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
# this is a hack
__UpperCamelCase :str = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__UpperCamelCase :Dict = model_args['''vocab_size''']
__UpperCamelCase :Dict = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__UpperCamelCase :Tuple = model_args.pop('''n_head''' )
__UpperCamelCase :Optional[Any] = model_args.pop('''n_embd''' )
__UpperCamelCase :List[Any] = model_args.pop('''n_layer''' )
__UpperCamelCase :Union[str, Any] = ConfigClass(**checkpoint['''model_args'''] )
__UpperCamelCase :str = ModelClass(config=SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = GenerationConfigClass()
__UpperCamelCase :Tuple = model_generation_config
__UpperCamelCase :str = checkpoint['''model''']
# fixup checkpoint
__UpperCamelCase :List[Any] = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
__UpperCamelCase :Optional[Any] = k[len(SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
__UpperCamelCase :Union[str, Any] = new_k.replace(SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
__UpperCamelCase :List[str] = state_dict.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
__UpperCamelCase :int = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
__UpperCamelCase :int = set(model.state_dict().keys() ) - set(state_dict.keys() )
__UpperCamelCase :int = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = checkpoint['''best_val_loss'''].item()
logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(SCREAMING_SNAKE_CASE , 3 )} loss""" )
model.eval()
model.to(SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__UpperCamelCase :List[Any] = '''cpu''' # do conversion on cpu
__UpperCamelCase :List[Any] = _get_ckpt_path(SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = _load_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
# load bark initial model
__UpperCamelCase :Optional[Any] = _bark_load_model(SCREAMING_SNAKE_CASE , '''cpu''' , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
if model_type == "text":
__UpperCamelCase :Dict = bark_model['''model''']
if model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
__UpperCamelCase :List[str] = 5
__UpperCamelCase :List[str] = 10
if model_type in ["text", "coarse"]:
__UpperCamelCase :Dict = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__UpperCamelCase :str = bark_model(SCREAMING_SNAKE_CASE )[0]
__UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE )
# take last logits
__UpperCamelCase :str = output_new_model_total.logits[:, [-1], :]
else:
__UpperCamelCase :Any = 3
__UpperCamelCase :List[Any] = 8
__UpperCamelCase :Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = bark_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :List[str] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = BarkSemanticConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCamelCase :Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCamelCase :Tuple = BarkFineConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCamelCase :List[Any] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
__UpperCamelCase :Union[str, Any] = BarkSemanticModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = BarkCoarseModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = BarkFineModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
__UpperCamelCase :Tuple = BarkConfig.from_sub_model_configs(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__UpperCamelCase :int = BarkModel(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = semantic
__UpperCamelCase :Any = coarseAcoustic
__UpperCamelCase :Tuple = fineAcoustic
__UpperCamelCase :List[Any] = codec
__UpperCamelCase :int = bark_generation_config
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
bark.save_pretrained(SCREAMING_SNAKE_CASE , repo_id=SCREAMING_SNAKE_CASE , push_to_hub=SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 43
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[str] = TextToVideoSDPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Dict = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """np"""
SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames
SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 296
| 0
|
'''simple docstring'''
import random
from typing import Any
def _UpperCAmelCase ( _UpperCamelCase : list ) -> list[Any]:
for _ in range(len(_UpperCamelCase ) ):
A_ = random.randint(0, len(_UpperCamelCase ) - 1 )
A_ = random.randint(0, len(_UpperCamelCase ) - 1 )
A_ ,A_ = data[b], data[a]
return data
if __name__ == "__main__":
__snake_case : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
__snake_case : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 371
|
'''simple docstring'''
from statistics import mean, stdev
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = min(_UpperCamelCase )
A_ = max(_UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min), _UpperCamelCase ) for x in data]
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = mean(_UpperCamelCase )
A_ = stdev(_UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma), _UpperCamelCase ) for x in data]
| 18
| 0
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCamelCase__ = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
lowerCamelCase__ = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
lowerCamelCase__ = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Any , __lowercase : Any , __lowercase : int = CHRF.CHAR_ORDER , __lowercase : int = CHRF.WORD_ORDER , __lowercase : int = CHRF.BETA , __lowercase : bool = False , __lowercase : bool = False , __lowercase : bool = False , ):
'''simple docstring'''
__a = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__a = [[refs[i] for refs in references] for i in range(__lowercase )]
__a = CHRF(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__a = sb_chrf.corpus_score(__lowercase , __lowercase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 302
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase : List[Any] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A =2
class _a :
def __init__( self : Any , *, # begin keyword-only arguments
lowercase : str="<s>" , lowercase : Optional[Any]="<pad>" , lowercase : str="</s>" , lowercase : str="<unk>" , lowercase : List[str]=None , ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = bos, unk, pad, eos
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = {}
UpperCAmelCase = self.add_symbol(lowercase )
UpperCAmelCase = self.add_symbol(lowercase )
UpperCAmelCase = self.add_symbol(lowercase )
UpperCAmelCase = self.add_symbol(lowercase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowercase )
UpperCAmelCase = len(self.symbols )
def __eq__( self : Optional[Any] , lowercase : str ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : List[Any] , lowercase : List[str] ):
'''simple docstring'''
return sym in self.indices
@classmethod
def A ( cls : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = cls()
d.add_from_file(lowercase )
return d
def A ( self : int , lowercase : Union[str, Any] , lowercase : str=1 , lowercase : List[Any]=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
UpperCAmelCase = self.indices[word]
UpperCAmelCase = self.count[idx] + n
return idx
else:
UpperCAmelCase = len(self.symbols )
UpperCAmelCase = idx
self.symbols.append(lowercase )
self.count.append(lowercase )
return idx
def A ( self : Optional[int] , lowercase : int ):
'''simple docstring'''
return 0
def A ( self : Tuple , lowercase : Optional[int] ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
try:
with open(lowercase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(lowercase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(lowercase ) )
return
UpperCAmelCase = f.readlines()
UpperCAmelCase = self._load_meta(lowercase )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase , UpperCAmelCase = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
UpperCAmelCase = True
UpperCAmelCase , UpperCAmelCase = line.rsplit(''' ''' , 1 )
else:
UpperCAmelCase = False
UpperCAmelCase = int(lowercase )
UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(lowercase ) )
self.add_symbol(lowercase , n=lowercase , overwrite=lowercase )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def snake_case_ (_a : int ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase = dict((re.sub(R'''@@$''' , '''''' , _a ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , _a ), v) for k, v in d.items() )
UpperCAmelCase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"{k}</w>"]
UpperCAmelCase = d[k] # restore
return da
def snake_case_ (_a : str , _a : str ):
# prep
if not os.path.exists(_a ):
raise ValueError(F"path {biogpt_checkpoint_path} does not exist!" )
os.makedirs(_a , exist_ok=_a )
print(F"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
UpperCAmelCase = os.path.join(_a , '''checkpoint.pt''' )
if not os.path.isfile(_a ):
raise ValueError(F"path to the file {checkpoint_file} does not exist!" )
UpperCAmelCase = torch.load(_a , map_location='''cpu''' )
UpperCAmelCase = chkpt['''cfg''']['''model''']
# dicts
UpperCAmelCase = os.path.join(_a , '''dict.txt''' )
if not os.path.isfile(_a ):
raise ValueError(F"path to the file {dict_file} does not exist!" )
UpperCAmelCase = Dictionary.load(_a )
UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase = len(_a )
UpperCAmelCase = os.path.join(_a , VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"Generating {src_vocab_file} of {src_vocab_size} records" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# merges_file (bpecodes)
UpperCAmelCase = os.path.join(_a , '''bpecodes''' )
if not os.path.isfile(_a ):
raise ValueError(F"path to the file {bpecodes_file} does not exist!" )
UpperCAmelCase = os.path.join(_a , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(_a , _a )
# model config
UpperCAmelCase = os.path.join(_a , '''config.json''' )
UpperCAmelCase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"Generating {biogpt_model_config_file}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# tokenizer config
UpperCAmelCase = os.path.join(_a , _a )
UpperCAmelCase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1_0_2_4,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"Generating {biogpt_tokenizer_config_file}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# model
UpperCAmelCase = chkpt['''model''']
# remove unneeded keys
UpperCAmelCase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(_a , _a )
UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
UpperCAmelCase = model_state_dict.pop(_a )
else:
UpperCAmelCase = model_state_dict.pop(_a )
UpperCAmelCase = BioGptConfig.from_pretrained(_a )
UpperCAmelCase = BioGptForCausalLM(_a )
# check that it loads ok
model_new.load_state_dict(_a )
# save
UpperCAmelCase = os.path.join(_a , _a )
print(F"Generating {pytorch_weights_dump_path}" )
torch.save(_a , _a )
print('''Conversion is done!''' )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 34
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a__ : Any = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :List[str] , **_A :Any ) -> Tuple:
'''simple docstring'''
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Any , _A :Union[str, List[str], "Image", List["Image"]] , **_A :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(_A , **_A )
def lowercase_ ( self :Optional[int] , **_A :Dict ) -> Optional[Any]:
'''simple docstring'''
__A = {}
if "candidate_labels" in kwargs:
__A = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__A = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowercase_ ( self :Optional[int] , _A :str , _A :str=None , _A :Tuple="This is a photo of {}." ) -> Optional[int]:
'''simple docstring'''
__A = load_image(_A )
__A = self.image_processor(images=[image] , return_tensors=self.framework )
__A = candidate_labels
__A = [hypothesis_template.format(_A ) for x in candidate_labels]
__A = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__A = [text_inputs]
return inputs
def lowercase_ ( self :List[str] , _A :Tuple ) -> Tuple:
'''simple docstring'''
__A = model_inputs.pop('candidate_labels' )
__A = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__A = text_inputs[0]
else:
# Batching case.
__A = text_inputs[0][0]
__A = self.model(**_A , **_A )
__A = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowercase_ ( self :List[str] , _A :Optional[int] ) -> Dict:
'''simple docstring'''
__A = model_outputs.pop('candidate_labels' )
__A = model_outputs['logits'][0]
if self.framework == "pt":
__A = logits.softmax(dim=-1 ).squeeze(-1 )
__A = probs.tolist()
if not isinstance(_A , _A ):
__A = [scores]
elif self.framework == "tf":
__A = stable_softmax(_A , axis=-1 )
__A = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__A = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result
| 161
| 0
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
snake_case_ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
snake_case_ = {
'ctrl': 256,
}
snake_case_ = {
'Pregnancy': 168629,
'Christianity': 7675,
'Explain': 106423,
'Fitness': 63440,
'Saving': 63163,
'Ask': 27171,
'Ass': 95985,
'Joke': 163509,
'Questions': 45622,
'Thoughts': 49605,
'Retail': 52342,
'Feminism': 164338,
'Writing': 11992,
'Atheism': 192263,
'Netflix': 48616,
'Computing': 39639,
'Opinion': 43213,
'Alone': 44967,
'Funny': 58917,
'Gaming': 40358,
'Human': 4088,
'India': 1331,
'Joker': 77138,
'Diet': 36206,
'Legal': 11859,
'Norman': 4939,
'Tip': 72689,
'Weight': 52343,
'Movies': 46273,
'Running': 23425,
'Science': 2090,
'Horror': 37793,
'Confession': 60572,
'Finance': 12250,
'Politics': 16360,
'Scary': 191985,
'Support': 12654,
'Technologies': 32516,
'Teenage': 66160,
'Event': 32769,
'Learned': 67460,
'Notion': 182770,
'Wikipedia': 37583,
'Books': 6665,
'Extract': 76050,
'Confessions': 102701,
'Conspiracy': 75932,
'Links': 63674,
'Narcissus': 150425,
'Relationship': 54766,
'Relationships': 134796,
'Reviews': 41671,
'News': 4256,
'Translation': 26820,
'multilingual': 128406,
}
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> str:
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
__snake_case = set(snake_case_ )
return pairs
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = VOCAB_FILES_NAMES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = CONTROL_CODES
def __init__(self : Dict , a__ : Dict , a__ : Optional[int] , a__ : Union[str, Any]="<unk>" , **a__ : Dict ):
"""simple docstring"""
super().__init__(unk_token=a__ , **a__ )
with open(a__ , encoding='''utf-8''' ) as vocab_handle:
__snake_case = json.load(a__ )
__snake_case = {v: k for k, v in self.encoder.items()}
with open(a__ , encoding='''utf-8''' ) as merges_handle:
__snake_case = merges_handle.read().split('''\n''' )[1:-1]
__snake_case = [tuple(merge.split() ) for merge in merges]
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {}
@property
def a (self : Dict ):
"""simple docstring"""
return len(self.encoder )
def a (self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a (self : str , a__ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__snake_case = tuple(a__ )
__snake_case = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__snake_case = get_pairs(a__ )
if not pairs:
return token
while True:
__snake_case = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(a__ ):
try:
__snake_case = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(a__ )
__snake_case = new_word
if len(a__ ) == 1:
break
else:
__snake_case = get_pairs(a__ )
__snake_case = '''@@ '''.join(a__ )
__snake_case = word[:-4]
__snake_case = word
return word
def a (self : Optional[Any] , a__ : Tuple ):
"""simple docstring"""
__snake_case = []
__snake_case = re.findall(R'''\S+\n?''' , a__ )
for token in words:
split_tokens.extend(list(self.bpe(a__ ).split(''' ''' ) ) )
return split_tokens
def a (self : Optional[Any] , a__ : Dict ):
"""simple docstring"""
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def a (self : Optional[Any] , a__ : str ):
"""simple docstring"""
return self.decoder.get(a__ , self.unk_token )
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = ''' '''.join(a__ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def a (self : int , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + '''\n''' )
__snake_case = 0
with open(a__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__snake_case = token_index
writer.write(''' '''.join(a__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 367
|
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int:
__snake_case = 2**power
__snake_case = str(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = 0
for i in list_num:
sum_of_num += int(snake_case_ )
return sum_of_num
if __name__ == "__main__":
snake_case_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case_ = solution(power)
print('Sum of the digits is: ', result)
| 238
| 0
|
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = CLIPConfig
lowerCAmelCase_ : Dict = ["CLIPEncoderLayer"]
def __init__( self , a__ ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
snake_case_ = CLIPVisionModelWithProjection(config.vision_config )
snake_case_ = nn.Linear(config.vision_config.projection_dim , 1 )
snake_case_ = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCAmelCase__ ( self , a__ , a__ , a__=0.5 , a__=0.5 ) -> Any:
'''simple docstring'''
snake_case_ = self.vision_model(a__ )[0]
snake_case_ = self.p_head(a__ )
snake_case_ = nsfw_detected.flatten()
snake_case_ = nsfw_detected > p_threshold
snake_case_ = nsfw_detected.tolist()
if any(a__ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(a__ ):
if nsfw_detected_:
snake_case_ = np.zeros(images[idx].shape )
snake_case_ = self.w_head(a__ )
snake_case_ = watermark_detected.flatten()
snake_case_ = watermark_detected > w_threshold
snake_case_ = watermark_detected.tolist()
if any(a__ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(a__ ):
if watermark_detected_:
snake_case_ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 85
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["input_features", "is_longer"]
def __init__( self : str , A : int=64 , A : Dict=48000 , A : str=480 , A : List[Any]=10 , A : Optional[Any]=1024 , A : Tuple=0.0 , A : List[Any]=False , A : float = 0 , A : float = 14000 , A : int = None , A : str = "fusion" , A : str = "repeatpad" , **A : Dict , ):
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
_UpperCAmelCase : Optional[Any] = top_db
_UpperCAmelCase : Dict = truncation
_UpperCAmelCase : List[Any] = padding
_UpperCAmelCase : Optional[Any] = fft_window_size
_UpperCAmelCase : Dict = (fft_window_size >> 1) + 1
_UpperCAmelCase : Any = hop_length
_UpperCAmelCase : Tuple = max_length_s
_UpperCAmelCase : str = max_length_s * sampling_rate
_UpperCAmelCase : Any = sampling_rate
_UpperCAmelCase : Optional[int] = frequency_min
_UpperCAmelCase : str = frequency_max
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm=A , mel_scale="htk" , )
_UpperCAmelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm="slaney" , mel_scale="slaney" , )
def _A ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _A ( self : Optional[Any] , A : np.array , A : Optional[np.array] = None ):
_UpperCAmelCase : Dict = spectrogram(
A , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=A , log_mel="dB" , )
return log_mel_spectrogram.T
def _A ( self : str , A : str , A : List[str] , A : List[Any] ):
_UpperCAmelCase : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : Optional[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : Tuple = [0]
# randomly choose index for each part
_UpperCAmelCase : Dict = np.random.choice(ranges[0] )
_UpperCAmelCase : str = np.random.choice(ranges[1] )
_UpperCAmelCase : Tuple = np.random.choice(ranges[2] )
_UpperCAmelCase : str = mel[idx_front : idx_front + chunk_frames, :]
_UpperCAmelCase : str = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCAmelCase : List[Any] = mel[idx_back : idx_back + chunk_frames, :]
_UpperCAmelCase : Dict = torch.tensor(mel[None, None, :] )
_UpperCAmelCase : Optional[Any] = torch.nn.functional.interpolate(
A , size=[chunk_frames, 64] , mode="bilinear" , align_corners=A )
_UpperCAmelCase : List[str] = mel_shrink[0][0].numpy()
_UpperCAmelCase : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _A ( self : List[Any] , A : np.array , A : List[str] , A : Any , A : Optional[int] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCAmelCase : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCAmelCase : str = len(A ) - max_length
_UpperCAmelCase : str = np.random.randint(0 , overflow + 1 )
_UpperCAmelCase : int = waveform[idx : idx + max_length]
_UpperCAmelCase : Any = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCAmelCase : Tuple = self._np_extract_fbank_features(A , self.mel_filters )
_UpperCAmelCase : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCAmelCase : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCAmelCase : Any = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCAmelCase : int = False
else:
_UpperCAmelCase : Tuple = self._random_mel_fusion(A , A , A )
_UpperCAmelCase : Any = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
_UpperCAmelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCAmelCase : str = int(max_length / len(A ) )
_UpperCAmelCase : Dict = np.stack(np.tile(A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCAmelCase : Dict = int(max_length / len(A ) )
_UpperCAmelCase : List[str] = np.stack(np.tile(A , A ) )
_UpperCAmelCase : Optional[Any] = np.pad(A , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
_UpperCAmelCase : str = self._np_extract_fbank_features(A , self.mel_filters )
_UpperCAmelCase : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCAmelCase : List[str] = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : str = None , A : Optional[str] = None , A : Optional[int] = None , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , **A : List[str] , ):
_UpperCAmelCase : int = truncation if truncation is not None else self.truncation
_UpperCAmelCase : Optional[int] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Any = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : int = [np.asarray(A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
_UpperCAmelCase : List[str] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[str] = [np.asarray(A )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCAmelCase : Dict = [
self._get_input_mel(A , max_length if max_length else self.nb_max_samples , A , A )
for waveform in raw_speech
]
_UpperCAmelCase : int = []
_UpperCAmelCase : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(A )
is_longer.append(A )
if truncation == "fusion" and sum(A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCAmelCase : Union[str, Any] = np.random.randint(0 , len(A ) )
_UpperCAmelCase : Optional[Any] = True
if isinstance(input_mel[0] , A ):
_UpperCAmelCase : List[str] = [np.asarray(A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCAmelCase : Tuple = [[longer] for longer in is_longer]
_UpperCAmelCase : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
_UpperCAmelCase : Tuple = BatchFeature(A )
if return_tensors is not None:
_UpperCAmelCase : List[Any] = input_features.convert_to_tensors(A )
return input_features
| 31
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.0_2 , __magic_name__=None , ) -> Dict:
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = num_patches + 1
def __UpperCAmelCase ( self ) -> List[str]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Optional[Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
_a = ViTMSNModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = self.type_sequence_label_size
_a = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , labels=__magic_name__ )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_lowerCAmelCase = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> int:
_a = ViTMSNModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __UpperCAmelCase ( self ) -> List[str]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__magic_name__ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = ViTMSNModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _A () -> Dict:
'''simple docstring'''
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ) -> Any:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ) -> int:
torch.manual_seed(2 )
_a = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(__magic_name__ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
_a = model(**__magic_name__ )
# verify the logits
_a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_a = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 104
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : str = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git_vision_model"""
def __init__( self , __magic_name__=7_68 , __magic_name__=30_72 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=2_24 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.0_2 , **__magic_name__ , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
_a , _a = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
_a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git"""
def __init__( self , __magic_name__=None , __magic_name__=3_05_22 , __magic_name__=7_68 , __magic_name__=6 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10_24 , __magic_name__=0.0_2 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=False , __magic_name__=1_01 , __magic_name__=1_02 , __magic_name__=None , **__magic_name__ , ) -> Optional[int]:
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , pad_token_id=__magic_name__ , **__magic_name__ )
if vision_config is None:
_a = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
_a = GitVisionConfig(**__magic_name__ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = tie_word_embeddings
_a = num_image_with_embedding
_a = bos_token_id
_a = eos_token_id
def __UpperCAmelCase ( self ) -> List[str]:
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
| 104
| 1
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( lowerCAmelCase ):
_a : BigBirdConfig
_a : jnp.dtype= jnp.floataa
_a : bool= True
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setup()
lowercase : List[str] = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : int = super().__call__(*snake_case ,**snake_case )
lowercase : Any = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __snake_case ( lowerCAmelCase ):
_a : List[Any]= FlaxBigBirdForNaturalQuestionsModule
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
def cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : int = logits.shape[-1]
lowercase : Dict = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE__ )[None]).astype("""f4""" )
lowercase : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
lowercase : Optional[Any] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase : Any = reduction(SCREAMING_SNAKE_CASE__ )
return loss
lowercase : Optional[Any] = partial(SCREAMING_SNAKE_CASE__ , reduction=jnp.mean )
lowercase : Optional[int] = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
_a : str= "google/bigbird-roberta-base"
_a : int= 3000
_a : int= 1_0500
_a : int= 128
_a : int= 3
_a : int= 1
_a : int= 5
# tx_args
_a : float= 3E-5
_a : float= 0.0
_a : int= 2_0000
_a : float= 0.00_95
_a : str= "bigbird-roberta-natural-questions"
_a : str= "training-expt"
_a : str= "data/nq-training.jsonl"
_a : str= "data/nq-validation.jsonl"
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
os.makedirs(self.base_dir ,exist_ok=snake_case )
lowercase : Optional[int] = os.path.join(self.base_dir ,self.save_dir )
lowercase : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
_a : int
_a : int= 4096 # no dynamic padding on TPUs
def __call__( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.collate_fn(snake_case )
lowercase : Union[str, Any] = jax.tree_util.tree_map(snake_case ,snake_case )
return batch
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] = self.fetch_inputs(features["""input_ids"""] )
lowercase : Tuple = {
"""input_ids""": jnp.array(snake_case ,dtype=jnp.intaa ),
"""attention_mask""": jnp.array(snake_case ,dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] ,dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] ,dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] ,dtype=jnp.intaa ),
}
return batch
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = [self._fetch_inputs(snake_case ) for ids in input_ids]
return zip(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = [1 for _ in range(len(snake_case ) )]
while len(snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Any:
if seed is not None:
lowercase : Optional[int] = dataset.shuffle(seed=SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) // batch_size ):
lowercase : Optional[Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE__ )
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
def loss_fn(SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = model_inputs.pop("""start_labels""" )
lowercase : Optional[int] = model_inputs.pop("""end_labels""" )
lowercase : str = model_inputs.pop("""pooled_labels""" )
lowercase : Union[str, Any] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , dropout_rng=SCREAMING_SNAKE_CASE__ , train=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : List[str] = outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
lowercase , lowercase : int = jax.random.split(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = jax.value_and_grad(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = grad_fn(state.params )
lowercase : List[Any] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase : List[Any] = jax.lax.pmean(SCREAMING_SNAKE_CASE__ , """batch""" )
lowercase : str = state.apply_gradients(grads=SCREAMING_SNAKE_CASE__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : int = model_inputs.pop("""start_labels""" )
lowercase : Dict = model_inputs.pop("""end_labels""" )
lowercase : Optional[Any] = model_inputs.pop("""pooled_labels""" )
lowercase : Optional[int] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=state.params , train=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : List[Any] = outputs
lowercase : Dict = state.loss_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __snake_case ( train_state.TrainState ):
_a : Callable= struct.field(pytree_node=lowerCAmelCase )
@dataclass
class __snake_case :
_a : Args
_a : Callable
_a : Callable
_a : Callable
_a : Callable
_a : wandb
_a : Callable= None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Tuple = model.params
lowercase : Any = TrainState.create(
apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,loss_fn=snake_case ,)
if ckpt_dir is not None:
lowercase , lowercase , lowercase , lowercase , lowercase : Tuple = restore_checkpoint(snake_case ,snake_case )
lowercase : List[str] = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase , lowercase : Tuple = build_tx(**snake_case )
lowercase : str = train_state.TrainState(
step=snake_case ,apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,opt_state=snake_case ,)
lowercase : Any = args
lowercase : Optional[Any] = data_collator
lowercase : List[str] = lr
lowercase : str = params
lowercase : Tuple = jax_utils.replicate(snake_case )
return state
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.args
lowercase : Optional[Any] = len(snake_case ) // args.batch_size
lowercase : int = jax.random.PRNGKey(0 )
lowercase : List[str] = jax.random.split(snake_case ,jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
lowercase : List[str] = get_batched_dataset(snake_case ,args.batch_size ,seed=snake_case )
lowercase : int = 0
for batch in tqdm(snake_case ,total=snake_case ,desc=f"Running EPOCH-{epoch}" ):
lowercase : Dict = self.data_collator(snake_case )
lowercase , lowercase , lowercase : Optional[int] = self.train_step_fn(snake_case ,snake_case ,**snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
lowercase : Optional[Any] = jax_utils.unreplicate(state.step )
lowercase : List[str] = running_loss.item() / i
lowercase : List[str] = self.scheduler_fn(state_step - 1 )
lowercase : int = self.evaluate(snake_case ,snake_case )
lowercase : Tuple = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(snake_case ) )
self.logger.log(snake_case ,commit=snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}" ,state=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[str] = get_batched_dataset(snake_case ,self.args.batch_size )
lowercase : Any = len(snake_case ) // self.args.batch_size
lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
lowercase : Optional[int] = 0
for batch in tqdm(snake_case ,total=snake_case ,desc="""Evaluating ... """ ):
lowercase : Tuple = self.data_collator(snake_case )
lowercase : Optional[int] = self.val_step_fn(snake_case ,**snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = jax_utils.unreplicate(snake_case )
print(f"SAVING CHECKPOINT IN {save_dir}" ,end=""" ... """ )
self.model_save_fn(snake_case ,params=state.params )
with open(os.path.join(snake_case ,"""opt_state.msgpack""" ) ,"""wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(snake_case ,"""args.joblib""" ) )
joblib.dump(self.data_collator ,os.path.join(snake_case ,"""data_collator.joblib""" ) )
with open(os.path.join(snake_case ,"""training_state.json""" ) ,"""w""" ) as f:
json.dump({"""step""": state.step.item()} ,snake_case )
print("""DONE""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=""" ... """ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase : str = from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase : Optional[int] = from_bytes(state.opt_state , f.read() )
lowercase : Optional[Any] = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """args.joblib""" ) )
lowercase : int = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """data_collator.joblib""" ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """training_state.json""" ) , """r""" ) as f:
lowercase : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : List[str] = num_train_steps - warmup_steps
lowercase : Dict = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=SCREAMING_SNAKE_CASE__ , transition_steps=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=1e-7 , transition_steps=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
def weight_decay_mask(SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = scheduler_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE__ , weight_decay=SCREAMING_SNAKE_CASE__ , mask=SCREAMING_SNAKE_CASE__ )
return tx, lr
| 20
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9
| 0
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def a ( __a , __a , __a = 16000 ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = int(round(sample_rate * max_length ) )
if len(__a ) <= sample_length:
return wav
UpperCamelCase__ :List[Any] = randint(0 , len(__a ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowercase :
"""simple docstring"""
_a = field(default=A__ , metadata={'help': 'Name of a dataset from the datasets package'} )
_a = field(
default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a = field(
default=A__ , metadata={'help': 'A file containing the training audio paths and labels.'} )
_a = field(
default=A__ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
_a = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_a = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_a = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_a = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
_a = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_a = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_a = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
_a = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a = field(
default=A__ , metadata={'help': 'Name or path of preprocessor config.'} )
_a = field(
default=A__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
_a = field(
default=A__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
_a = field(
default=A__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_a = field(
default=A__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_a = field(
default=A__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , UpperCamelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , __a , __a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :Dict = training_args.get_process_log_level()
logger.setLevel(__a )
transformers.utils.logging.set_verbosity(__a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ :List[Any] = DatasetDict()
UpperCamelCase__ :str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ :Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ :Optional[int] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ :str = feature_extractor.model_input_names[0]
def train_transforms(__a ):
UpperCamelCase__ :Optional[int] = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ :Union[str, Any] = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__a )
UpperCamelCase__ :List[str] = feature_extractor(__a , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ :Dict = {model_input_name: inputs.get(__a )}
UpperCamelCase__ :List[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__a ):
UpperCamelCase__ :Optional[Any] = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ :Union[str, Any] = feature_extractor(__a , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ :List[Any] = {model_input_name: inputs.get(__a )}
UpperCamelCase__ :Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ :Optional[int] = raw_datasets['''train'''].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = {}, {}
for i, label in enumerate(__a ):
UpperCamelCase__ :int = str(__a )
UpperCamelCase__ :Tuple = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ :str = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__a ):
UpperCamelCase__ :int = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__a , references=eval_pred.label_ids )
UpperCamelCase__ :str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel=__a , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ :List[str] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ :int = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__a , output_all_columns=__a )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ :List[Any] = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__a , output_all_columns=__a )
# Initialize our trainer
UpperCamelCase__ :Dict = Trainer(
model=__a , args=__a , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=__a , tokenizer=__a , )
# Training
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Optional[int] = last_checkpoint
UpperCamelCase__ :int = trainer.train(resume_from_checkpoint=__a )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :Tuple = trainer.evaluate()
trainer.log_metrics('''eval''' , __a )
trainer.save_metrics('''eval''' , __a )
# Write model card and (optionally) push to hub
UpperCamelCase__ :int = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__a )
else:
trainer.create_model_card(**__a )
if __name__ == "__main__":
main()
| 219
|
'''simple docstring'''
from math import ceil
def a ( __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :str = list(range(0 , __a ) )
UpperCamelCase__ :Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase__ :Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__a )
# Missing blocks
UpperCamelCase__ :List[str] = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase__ :Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__a ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(__a ) )
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = list(range(__a ) )
UpperCamelCase__ :Any = int(ceil(n_layers / len(__a ) ) )
UpperCamelCase__ :List[Any] = [layers[i : i + n_blocks] for i in range(0 , __a , __a )]
return dict(zip(__a , __a ) )
| 219
| 1
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__ : str = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
__lowercase = text_generator('This is a test' , do_sample=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
__lowercase = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
_UpperCAmelCase , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
__lowercase = text_generator('This is a test' , do_sample=_UpperCAmelCase , num_return_sequences=2 , return_tensors=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'generated_token_ids': ANY(_UpperCAmelCase )},
{'generated_token_ids': ANY(_UpperCAmelCase )},
] , )
__lowercase = text_generator.model.config.eos_token_id
__lowercase = '<pad>'
__lowercase = text_generator(
['This is a test', 'This is a second test'] , do_sample=_UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=_UpperCAmelCase , )
self.assertEqual(
_UpperCAmelCase , [
[
{'generated_token_ids': ANY(_UpperCAmelCase )},
{'generated_token_ids': ANY(_UpperCAmelCase )},
],
[
{'generated_token_ids': ANY(_UpperCAmelCase )},
{'generated_token_ids': ANY(_UpperCAmelCase )},
],
] , )
@require_tf
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
__lowercase = text_generator('This is a test' , do_sample=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
__lowercase = text_generator(['This is a test', 'This is a second test'] , do_sample=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ) -> Any:
"""simple docstring"""
__lowercase = TextGenerationPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = 'Hello I believe in'
__lowercase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
__lowercase = text_generator(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
__lowercase = text_generator(_UpperCAmelCase , stop_sequence=' fe' )
self.assertEqual(_UpperCAmelCase , [{'generated_text': 'Hello I believe in fe'}] )
def a__ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = text_generator.model
__lowercase = text_generator.tokenizer
__lowercase = text_generator('This is a test' )
self.assertEqual(_UpperCAmelCase , [{'generated_text': ANY(_UpperCAmelCase )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
__lowercase = text_generator('This is a test' , return_full_text=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [{'generated_text': ANY(_UpperCAmelCase )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
__lowercase = pipeline(task='text-generation' , model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , return_full_text=_UpperCAmelCase )
__lowercase = text_generator('This is a test' )
self.assertEqual(_UpperCAmelCase , [{'generated_text': ANY(_UpperCAmelCase )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
__lowercase = text_generator('This is a test' , return_full_text=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [{'generated_text': ANY(_UpperCAmelCase )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
__lowercase = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
[{'generated_text': ANY(_UpperCAmelCase )}, {'generated_text': ANY(_UpperCAmelCase )}],
[{'generated_text': ANY(_UpperCAmelCase )}, {'generated_text': ANY(_UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__lowercase = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
[{'generated_text': ANY(_UpperCAmelCase )}, {'generated_text': ANY(_UpperCAmelCase )}],
[{'generated_text': ANY(_UpperCAmelCase )}, {'generated_text': ANY(_UpperCAmelCase )}],
] , )
with self.assertRaises(_UpperCAmelCase ):
__lowercase = text_generator('test' , return_full_text=_UpperCAmelCase , return_text=_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase ):
__lowercase = text_generator('test' , return_full_text=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase ):
__lowercase = text_generator('test' , return_text=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__lowercase = text_generator('' )
self.assertEqual(_UpperCAmelCase , [{'generated_text': ANY(_UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__lowercase = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__lowercase = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 5_00 , max_new_tokens=20 )
__lowercase = text_generator('This is a test' * 5_00 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_UpperCAmelCase ):
text_generator(
'This is a test' * 5_00 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
import torch
# Classic `model_kwargs`
__lowercase = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__lowercase = pipe('This is a test' )
self.assertEqual(
_UpperCAmelCase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__lowercase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__lowercase = pipe('This is a test' )
self.assertEqual(
_UpperCAmelCase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__lowercase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__lowercase = pipe('This is a test' )
self.assertEqual(
_UpperCAmelCase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
__lowercase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
import torch
__lowercase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=_UpperCAmelCase , top_p=0.5 )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = 'Hello world'
__lowercase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
__lowercase = logging.get_logger('transformers.generation.tf_utils' )
else:
__lowercase = logging.get_logger('transformers.generation.utils' )
__lowercase = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_UpperCAmelCase ) as cl:
__lowercase = text_generator(_UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(_UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_UpperCAmelCase ) as cl:
__lowercase = text_generator(_UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(_UpperCAmelCase , cl.out )
with CaptureLogger(_UpperCAmelCase ) as cl:
__lowercase = text_generator(_UpperCAmelCase , max_length=10 )
self.assertNotIn(_UpperCAmelCase , cl.out )
| 325
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ = 10
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowercase = one_third - 1
elif array[two_third] < target:
__lowercase = two_third + 1
else:
__lowercase = one_third + 1
__lowercase = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 325
| 1
|
'''simple docstring'''
from math import factorial
def snake_case__ ( lowerCamelCase__ : int = 1_0_0 ) -> int:
return sum(int(lowerCamelCase__ ) for x in str(factorial(lowerCamelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 4
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4
| 1
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list[list[int]]:
__snake_case: list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , [] , SCREAMING_SNAKE_CASE__)
return result
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> None:
if level == 0:
total_list.append(current_list[:])
return
for i in range(SCREAMING_SNAKE_CASE__ , total_number - level + 2):
current_list.append(SCREAMING_SNAKE_CASE__)
create_all_state(i + 1 , SCREAMING_SNAKE_CASE__ , level - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
current_list.pop()
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__UpperCAmelCase : int = 4
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : Optional[int] = generate_all_combinations(n, k)
print_all_state(total_list)
| 111
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : List[str]=1 , A : str=0 , A : List[Any]=2 , A : Union[str, Any]=512 , A : Tuple="cls" , A : Union[str, Any]=False , A : Optional[Any]=True , **A : Optional[int] , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
__snake_case: str = project_dim
__snake_case: Optional[int] = pooler_fn
__snake_case: Dict = learn_encoder
__snake_case: str = use_attention_mask
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = [R"""pooler""", R"""logit_scale"""]
lowerCAmelCase__ = [R"""position_ids""", R"""predictions.decoder.bias"""]
lowerCAmelCase__ = """roberta"""
lowerCAmelCase__ = RobertaSeriesConfig
def __init__( self : Dict , A : Dict ):
super().__init__(A )
__snake_case: Optional[Any] = XLMRobertaModel(A )
__snake_case: List[Any] = nn.Linear(config.hidden_size , config.project_dim )
__snake_case: Optional[int] = getattr(A , """has_pre_transformation""" , A )
if self.has_pre_transformation:
__snake_case: Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
__snake_case: Optional[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : Optional[bool] = None , ):
__snake_case: Any = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case: Optional[int] = self.base_model(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , encoder_hidden_states=A , encoder_attention_mask=A , output_attentions=A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=A , )
if self.has_pre_transformation:
__snake_case: int = outputs["""hidden_states"""][-2]
__snake_case: List[str] = self.pre_LN(A )
__snake_case: List[str] = self.transformation_pre(A )
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__snake_case: Optional[int] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 111
| 1
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCAmelCase__ ( lowerCamelCase ):
return (data["data"], data["target"])
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :Optional[int] = XGBClassifier()
classifier.fit(lowerCamelCase, lowerCamelCase )
return classifier
def UpperCAmelCase__ ( ):
lowercase :List[str] = load_iris()
lowercase , lowercase :Union[str, Any] = data_handling(lowerCamelCase )
lowercase , lowercase , lowercase , lowercase :int = train_test_split(
lowerCamelCase, lowerCamelCase, test_size=0.25 )
lowercase :List[str] = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowercase :Union[str, Any] = xgboost(lowerCamelCase, lowerCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCamelCase, lowerCamelCase, lowerCamelCase, display_labels=lowerCamelCase, cmap="Blues", normalize="true", )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 158
|
import pytest
_UpperCAmelCase : List[Any] = "__dummy_dataset1__"
_UpperCAmelCase : Union[str, Any] = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def UpperCAmelCase__ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase__ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = dataset_loading_script_name
lowercase :Dict = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowercase :int = script_dir / F"{script_name}.py"
with open(lowerCamelCase, "w" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 158
| 1
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = len(SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
A_ : str = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
A_ : Union[str, Any] = run_maze(SCREAMING_SNAKE_CASE , 0 , 0 , SCREAMING_SNAKE_CASE )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = len(SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
A_ : List[str] = 1
return True
A_ : List[Any] = (not i < 0) and (not j < 0) # Check lower bounds
A_ : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A_ : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A_ : Union[str, Any] = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE , i + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j + 1 , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j - 1 , SCREAMING_SNAKE_CASE )
):
return True
A_ : List[str] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18
| 0
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
snake_case_ = 'bert-base-cased'
snake_case_ = 'google/pegasus-xsum'
snake_case_ = [' Sam ate lunch today.', 'Sams lunch ingredients.']
snake_case_ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
snake_case_ = 'patrickvonplaten/t5-tiny-random'
snake_case_ = 'sshleifer/bart-tiny-random'
snake_case_ = 'sshleifer/tiny-mbart'
snake_case_ = 'sshleifer/tiny-marian-en-de'
def lowerCamelCase__ ( snake_case_ : Path , snake_case_ : list ) -> Dict:
__snake_case = '''\n'''.join(snake_case_ )
Path(snake_case_ ).open('''w''' ).writelines(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> int:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(snake_case_ , f"""{split}.source""" ) , snake_case_ )
_dump_articles(os.path.join(snake_case_ , f"""{split}.target""" ) , snake_case_ )
return tmp_dir
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def a (self : Union[str, Any] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(a__ )
__snake_case = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__snake_case = max(len(tokenizer.encode(a__ ) ) for a in ARTICLES )
__snake_case = max(len(tokenizer.encode(a__ ) ) for a in SUMMARIES )
__snake_case = 4
__snake_case = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__snake_case , __snake_case = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__snake_case = SeqaSeqDataset(
a__ , data_dir=a__ , type_path='''train''' , max_source_length=a__ , max_target_length=a__ , src_lang=a__ , tgt_lang=a__ , )
__snake_case = DataLoader(a__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(a__ , a__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__snake_case = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def a (self : Tuple , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(a__ )
__snake_case = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__snake_case = max(len(tokenizer.encode(a__ ) ) for a in ARTICLES )
__snake_case = max(len(tokenizer.encode(a__ ) ) for a in SUMMARIES )
__snake_case = 4
__snake_case = LegacySeqaSeqDataset(
a__ , data_dir=a__ , type_path='''train''' , max_source_length=20 , max_target_length=a__ , )
__snake_case = DataLoader(a__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def a (self : int ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
__snake_case = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__snake_case = tmp_dir.joinpath('''train.source''' ).open().readlines()
__snake_case = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(a__ , a__ , 128 , a__ )
__snake_case = {x.name for x in tmp_dir.iterdir()}
__snake_case = {x.name for x in save_dir.iterdir()}
__snake_case = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(a__ ) < len(a__ )
assert len(a__ ) == 1
assert len(packed_examples[0] ) == sum(len(a__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def a (self : Any ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
__snake_case , __snake_case , __snake_case = self._get_dataset(max_len=64 )
__snake_case = 64
__snake_case = ds.make_dynamic_sampler(a__ , required_batch_size_multiple=a__ )
__snake_case = [len(a__ ) for x in batch_sampler]
assert len(set(a__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(a__ ) == len(a__ ) # no dropped or added examples
__snake_case = DataLoader(a__ , batch_sampler=a__ , collate_fn=ds.collate_fn , num_workers=2 )
__snake_case = []
__snake_case = []
for batch in data_loader:
__snake_case = batch['''input_ids'''].shape
__snake_case = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__snake_case = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(a__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(a__ )
assert num_src_per_batch[0] == max(a__ )
if failures:
raise AssertionError(f"""too many tokens in {len(a__ )} batches""" )
def a (self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._get_dataset(max_len=512 )
__snake_case = 2
__snake_case = ds.make_sortish_sampler(a__ , shuffle=a__ )
__snake_case = DataLoader(a__ , batch_size=a__ , collate_fn=ds.collate_fn , num_workers=2 )
__snake_case = DataLoader(a__ , batch_size=a__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=a__ )
__snake_case = tokenizer.pad_token_id
def count_pad_tokens(a__ : List[str] , a__ : List[Any]="input_ids" ):
return [batch[k].eq(a__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(a__ , k='''labels''' ) ) < sum(count_pad_tokens(a__ , k='''labels''' ) )
assert sum(count_pad_tokens(a__ ) ) < sum(count_pad_tokens(a__ ) )
assert len(a__ ) == len(a__ )
def a (self : int , a__ : Optional[Any]=1000 , a__ : Optional[Any]=128 ):
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' , a__ ):
__snake_case = '''examples/seq2seq/wmt_en_ro'''
__snake_case = max_len * 2 * 64
if not Path(a__ ).joinpath('''train.len''' ).exists():
save_len_file(a__ , a__ )
else:
__snake_case = '''examples/seq2seq/test_data/wmt_en_ro'''
__snake_case = max_len * 4
save_len_file(a__ , a__ )
__snake_case = AutoTokenizer.from_pretrained(a__ )
__snake_case = SeqaSeqDataset(
a__ , data_dir=a__ , type_path='''train''' , max_source_length=a__ , max_target_length=a__ , n_obs=a__ , )
return ds, max_tokens, tokenizer
def a (self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._get_dataset()
__snake_case = set(DistributedSortishSampler(a__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=a__ ) )
__snake_case = set(DistributedSortishSampler(a__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=a__ ) )
assert idsa.intersection(a__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def a (self : Optional[int] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(a__ , use_fast=a__ )
if tok_name == MBART_TINY:
__snake_case = SeqaSeqDataset(
a__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__snake_case = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__snake_case = SeqaSeqDataset(
a__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__snake_case = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(a__ ) == 1 if tok_name == BART_TINY else len(a__ ) == 0
| 352
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : int , *a__ : List[Any] , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[Any] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : List[str] ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : int , **a__ : int ):
"""simple docstring"""
return {}, {}, {}
def a (self : Optional[int] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 238
| 0
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_:str = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
SCREAMING_SNAKE_CASE_:Dict = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
SCREAMING_SNAKE_CASE_:Tuple = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_:Any = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
for tf_name, hf_name in patterns:
A : List[Any] = k.replace(_lowerCAmelCase , _lowerCAmelCase )
return k
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
A : int = BigBirdPegasusConfig(**_lowerCAmelCase )
A : Tuple = BigBirdPegasusForConditionalGeneration(_lowerCAmelCase )
A : int = torch_model.state_dict()
A : Any = {}
# separating decoder weights
A : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
A : List[Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
A : Any = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
A : Tuple = DECODER_PATTERNS
A : Union[str, Any] = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
A : int = v.T
A : Dict = torch.from_numpy(_lowerCAmelCase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
A : Dict = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
A : Optional[Any] = REMAINING_PATTERNS
A : Tuple = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
A : List[str] = v.T
A : str = torch.from_numpy(_lowerCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
A : Dict = mapping["""model.embed_positions.weight"""]
A : List[Any] = mapping.pop("""model.embed_positions.weight""" )
A , A : int = torch_model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
A : Tuple = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : Any = tf.train.list_variables(_lowerCAmelCase )
A : Union[str, Any] = {}
A : List[str] = ["""global_step"""]
for name, shape in tqdm(_lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
A : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
A : Union[str, Any] = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
A : Dict = array
return tf_weights
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : Any = get_tf_weights_as_numpy(_lowerCAmelCase )
A : Optional[Any] = convert_bigbird_pegasus(_lowerCAmelCase , _lowerCAmelCase )
torch_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE_:Optional[int] = parser.parse_args()
SCREAMING_SNAKE_CASE_:Optional[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 116
|
def __UpperCamelCase ( _lowerCAmelCase ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 116
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :Optional[Any] = logging.get_logger(__name__)
a :str = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = """funnel"""
_SCREAMING_SNAKE_CASE :str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = block_sizes
SCREAMING_SNAKE_CASE__ : str = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE__ : List[str] = num_decoder_layers
SCREAMING_SNAKE_CASE__ : Any = d_model
SCREAMING_SNAKE_CASE__ : List[str] = n_head
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_head
SCREAMING_SNAKE_CASE__ : str = d_inner
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout
SCREAMING_SNAKE_CASE__ : Any = attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_std
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_type
SCREAMING_SNAKE_CASE__ : Dict = separate_cls
SCREAMING_SNAKE_CASE__ : Optional[int] = truncate_seq
SCREAMING_SNAKE_CASE__ : List[Any] = pool_q_only
super().__init__(**_a )
@property
def _a ( self ) -> int:
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def _a ( self , _a ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def _a ( self ) -> Tuple:
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def _a ( self , _a ) -> Any:
"""simple docstring"""
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 56
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowercase ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( ) -> Iterator[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def _lowercase ( __lowerCAmelCase = 200_0000 ) -> int:
return sum(takewhile(lambda __lowerCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 56
| 1
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=16 , lowerCAmelCase__=36 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Tuple:
'''simple docstring'''
lowercase__: List[str] = parent
lowercase__: List[Any] = batch_size
lowercase__: List[str] = seq_length
lowercase__: Any = is_training
lowercase__: Dict = use_input_mask
lowercase__: List[str] = use_token_type_ids
lowercase__: str = use_labels
lowercase__: int = vocab_size
lowercase__: List[str] = embedding_size
lowercase__: Dict = hidden_size
lowercase__: Any = num_hidden_layers
lowercase__: List[str] = num_hidden_groups
lowercase__: int = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: Optional[int] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[Any] = max_position_embeddings
lowercase__: Optional[Any] = type_vocab_size
lowercase__: List[Any] = type_sequence_label_size
lowercase__: List[str] = initializer_range
lowercase__: Any = num_labels
lowercase__: Optional[Any] = num_choices
lowercase__: str = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Tuple = None
if self.use_input_mask:
lowercase__: Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[str] = None
if self.use_token_type_ids:
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: int = None
lowercase__: Union[str, Any] = None
lowercase__: str = None
if self.use_labels:
lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Any = AlbertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__: Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowercase__: Any = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowercase__: List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = AlbertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__: Optional[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
lowercase__: Dict = AlbertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__: List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = AlbertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__: str = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = self.num_labels
lowercase__: str = AlbertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__: Optional[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__: List[str] = self.num_labels
lowercase__: Optional[int] = AlbertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__: str = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = self.num_choices
lowercase__: Dict = AlbertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__: Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): Union[str, Any] = config_and_inputs
lowercase__: str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
__lowercase : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : Tuple = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Dict = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
'''simple docstring'''
lowercase__: List[str] = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
lowercase__: Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
lowercase__: List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = AlbertModelTester(self )
lowercase__: Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__: List[str] = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Tuple = AlbertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Tuple = AlbertModel.from_pretrained('albert-base-v2' )
lowercase__: Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase__: str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Optional[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
lowercase__: List[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
lowercase__: Union[str, Any] = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) )
| 196
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def run_func(lowercase ):
@wraps(lowercase )
def run_in_eager_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
@wraps(lowercase )
@tf.function(experimental_compile=lowercase )
def run_in_graph_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random.Random()
_UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class a ( lowerCAmelCase_ ):
_snake_case : TensorFlowBenchmarkArguments
_snake_case : PretrainedConfig
_snake_case : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCAmelCase , training=__lowerCAmelCase )
_UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
_UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase = timeit.repeat(
__lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_UpperCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_UpperCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase )
_UpperCAmelCase = meminfo.used
_UpperCAmelCase = Memory(__lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_UpperCAmelCase = None
else:
_UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase )
_UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase )
if memory is None:
_UpperCAmelCase = summary.total
else:
_UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 289
| 0
|
from collections import namedtuple
__lowerCamelCase : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
__lowerCamelCase : Optional[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00454, 264.172),
'''cubicyard''': from_to(0.76455, 1.30795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.000236588, 4226.75),
}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : str , __UpperCamelCase : str ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(__UpperCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(__UpperCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __snake_case ( unittest.TestCase ):
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
SCREAMING_SNAKE_CASE__ = Vector()
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowercase ) , """(0,0,0,0,0,1)""" )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowercase ) , 4 )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2] )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4, 5] )
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([2, -1, 4] ) # for test of dot product
SCREAMING_SNAKE_CASE__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __a ( self : str ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowercase , _lowercase ) ) , """(3,4,7)""" )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = x.copy()
self.assertEqual(str(_lowercase ) , str(_lowercase ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowercase ) , """(0,1,0)""" )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowercase , _lowercase ) )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowercase , _lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __a ( self : Any ):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 204
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class __A :
def __init__(self : List[Any] , __a : list[str] ):
UpperCAmelCase_ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__a )
self.set_fail_transitions()
def _lowercase (self : Union[str, Any] , __a : int , __a : str ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowercase (self : Optional[Any] , __a : str ):
UpperCAmelCase_ = 0
for character in keyword:
UpperCAmelCase_ = self.find_next_state(__a , __a )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase_ = len(self.adlist ) - 1
else:
UpperCAmelCase_ = next_state
self.adlist[current_state]["output"].append(__a )
def _lowercase (self : int ):
UpperCAmelCase_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__a )
UpperCAmelCase_ = 0
while q:
UpperCAmelCase_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__a )
UpperCAmelCase_ = self.adlist[r]["fail_state"]
while (
self.find_next_state(__a , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase_ = self.adlist[state]["fail_state"]
UpperCAmelCase_ = self.find_next_state(
__a , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase_ = 0
UpperCAmelCase_ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _lowercase (self : Dict , __a : str ):
UpperCAmelCase_ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase_ = 0
for i in range(len(__a ) ):
while (
self.find_next_state(__a , string[i] ) is None
and current_state != 0
):
UpperCAmelCase_ = self.adlist[current_state]["fail_state"]
UpperCAmelCase_ = self.find_next_state(__a , string[i] )
if next_state is None:
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase_ = []
result[key].append(i - len(__a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _A (lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class a :
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
pass
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = np.abs((a - b) ).max()
self.assertLessEqual(__magic_name__ , __magic_name__ , f'Difference between torch and flax is {diff} (>= {tol}).' )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Optional[Any]:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Union[str, Any]:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
_a = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
_a = after_output[0]
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-3 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Any:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(
input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , output_attentions=__magic_name__ )
_a = output.vision_model_output.attentions
self.assertEqual(len(__magic_name__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = to_atuple(vision_model.config.image_size )
_a = to_atuple(vision_model.config.patch_size )
_a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a = output.text_model_output.attentions
self.assertEqual(len(__magic_name__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
pt_model.to(__magic_name__ )
pt_model.eval()
# prepare inputs
_a = inputs_dict
_a = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_a = pt_model(**__magic_name__ ).to_tuple()
_a = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
_a = fx_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__magic_name__ )
_a = VisionTextDualEncoderModel.from_pretrained(__magic_name__ , from_flax=__magic_name__ )
pt_model_loaded.to(__magic_name__ )
pt_model_loaded.eval()
with torch.no_grad():
_a = pt_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output_loaded.numpy() , 4e-2 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = VisionTextDualEncoderModel(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __magic_name__ )
_a = fx_state
self.check_pt_flax_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = VisionTextDualEncoderModel(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = load_flax_weights_in_pytorch_model(__magic_name__ , fx_model.params )
self.check_pt_flax_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
self.check_save_load(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__magic_name__ )
@is_pt_flax_cross_test
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
_a = config_inputs_dict.pop('vision_config' )
_a = config_inputs_dict.pop('text_config' )
_a = config_inputs_dict
self.check_equivalence_pt_to_flax(__magic_name__ , __magic_name__ , __magic_name__ )
self.check_equivalence_flax_to_pt(__magic_name__ , __magic_name__ , __magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a , _a = self.get_pretrained_model_and_inputs()
_a = model_a(**__magic_name__ )
_a = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
_a = model_a(**__magic_name__ )
_a = after_outputs[0]
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-5 )
@require_flax
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> List[str]:
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__magic_name__ , text_from_pt=__magic_name__ , )
_a = 13
_a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = FlaxViTModel(__magic_name__ )
_a = FlaxBertModel(__magic_name__ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = FlaxViTModelTester(self )
_a = FlaxBertModelTester(self )
_a = vit_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a = vision_config_and_inputs
_a , _a , _a , _a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> Any:
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__magic_name__ , text_from_pt=__magic_name__ , )
_a = 13
_a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = FlaxCLIPVisionModel(__magic_name__ )
_a = FlaxBertModel(__magic_name__ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Tuple:
_a = FlaxCLIPVisionModelTester(self )
_a = FlaxBertModelTester(self )
_a = clip_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a = vision_config_and_inputs
_a , _a , _a , _a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class a ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ) -> Tuple:
_a = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
_a = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=__magic_name__ , padding=__magic_name__ , return_tensors='np' )
_a = model(**__magic_name__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_a = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __magic_name__ , atol=1e-3 ) )
| 168
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = StableUnCLIPPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : int = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__snake_case : Tuple = False
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=UpperCAmelCase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCAmelCase_ , num_layers=1 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase_ , layers_per_block=1 , upcast_attention=UpperCAmelCase_ , use_linear_projection=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL()
_SCREAMING_SNAKE_CASE = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def UpperCamelCase ( self: str , UpperCAmelCase_: int , UpperCAmelCase_: Tuple=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase_ )
@slow
@require_torch_gpu
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_SCREAMING_SNAKE_CASE = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe("""anime turle""" , generator=UpperCAmelCase_ , output_type="""np""" )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 125
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv3FeatureExtractor''']
UpperCamelCase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 125
| 1
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( a__: list[int] ) -> list[int]: # This function is recursive
'''simple docstring'''
_UpperCAmelCase = len(a__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_UpperCAmelCase = array[0]
_UpperCAmelCase = False
_UpperCAmelCase = 1
_UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
_UpperCAmelCase = True
_UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
_UpperCAmelCase = longest_subsequence(a__ )
if len(a__ ) > len(a__ ):
_UpperCAmelCase = temp_array
else:
i += 1
_UpperCAmelCase = [element for element in array[1:] if element >= pivot]
_UpperCAmelCase = [pivot, *longest_subsequence(a__ )]
if len(a__ ) > len(a__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185
|
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase__ :str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase__ :Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(a__ ) ):
_UpperCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_UpperCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(a__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(a__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(a__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_UpperCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(a__ )
return next_generation
def lowerCAmelCase__ ( a__: list[list[int]] , a__: int ) -> list[Image.Image]:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
# Create output image
_UpperCAmelCase = Image.new('RGB' , (len(cells[0] ), len(a__ )) )
_UpperCAmelCase = img.load()
# Save cells to image
for x in range(len(a__ ) ):
for y in range(len(cells[0] ) ):
_UpperCAmelCase = 2_5_5 - cells[y][x] * 2_5_5
_UpperCAmelCase = (colour, colour, colour)
# Save image
images.append(a__ )
_UpperCAmelCase = new_generation(a__ )
return images
if __name__ == "__main__":
lowerCAmelCase__ :Tuple = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 185
| 1
|
'''simple docstring'''
from math import factorial
def a_ ( lowerCamelCase : int = 100 ):
return sum(int(lowerCamelCase ) for x in str(factorial(lowerCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 4
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
def __init__( self : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : List[str]=3 ,lowerCamelCase__ : Dict=10 ,lowerCamelCase__ : Optional[int]=[10, 20, 30, 40] ,lowerCamelCase__ : int=[1, 1, 2, 1] ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Tuple="relu" ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : int=None ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Union[str, Any] = hidden_sizes
_UpperCamelCase : List[Any] = depths
_UpperCamelCase : Any = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : int = scope
_UpperCamelCase : Any = len(__A )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Optional[Any] = None
if self.use_labels:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = TFResNetModel(config=__A )
_UpperCamelCase : Optional[int] = model(__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.num_labels
_UpperCamelCase : Optional[int] = TFResNetForImageClassification(__A )
_UpperCamelCase : int = model(__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase : Union[str, Any] = config_and_inputs
_UpperCamelCase : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( A__ , A__ , unittest.TestCase ):
lowercase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = TFResNetModelTester(self )
_UpperCamelCase : Union[str, Any] = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(__A )
_UpperCamelCase : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__A )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : int ,lowerCamelCase__ : str ,lowerCamelCase__ : List[Any] ):
_UpperCamelCase : Any = model_class(__A )
_UpperCamelCase : List[str] = model(**self._prepare_for_class(__A ,__A ) )
_UpperCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Any = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : List[Any] = layer_type
_UpperCamelCase : Dict = True
check_hidden_states_output(__A ,__A ,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : int = True
check_hidden_states_output(__A ,__A ,__A )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[Any] = TFResNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A__ ( ):
_UpperCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCamelCase : Union[str, Any] = self.default_image_processor
_UpperCamelCase : Any = prepare_img()
_UpperCamelCase : Optional[Any] = image_processor(images=__A ,return_tensors='tf' )
# forward pass
_UpperCamelCase : Union[str, Any] = model(**__A )
# verify the logits
_UpperCamelCase : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,__A )
_UpperCamelCase : List[Any] = tf.constant([-11.1069, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,__A ,atol=1E-4 ) )
| 351
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = 'hf-internal-testing/tiny-random-t5'
_UpperCamelCase : str = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[str] = tokenizer('This is me' ,return_tensors='pt' )
_UpperCamelCase : str = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_UpperCamelCase : Optional[Any] = model.generate(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_UpperCamelCase : Optional[Any] = model_reloaded.generate(**lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 'hf-internal-testing/tiny-random-t5'
_UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[str] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase__ ):
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : str = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase__ )
| 236
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
if not head:
return True
# split the list to two parts
__lowerCamelCase , __lowerCamelCase = head.next, head
while fast and fast.next:
__lowerCamelCase = fast.next.next
__lowerCamelCase = slow.next
__lowerCamelCase = slow.next
__lowerCamelCase = None # Don't forget here! But forget still works!
# reverse the second part
__lowerCamelCase = None
while second:
__lowerCamelCase = second.next
__lowerCamelCase = node
__lowerCamelCase = second
__lowerCamelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__lowerCamelCase = node.next
__lowerCamelCase = head.next
return True
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__lowerCamelCase = __lowerCamelCase = __lowerCamelCase = head
while fast and fast.next:
__lowerCamelCase , __lowerCamelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
__lowerCamelCase = [slow.val]
while slow.next:
__lowerCamelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__lowerCamelCase = cur.next
return True
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
if not head or not head.next:
return True
__lowerCamelCase = {}
__lowerCamelCase = 0
while head:
if head.val in d:
d[head.val].append(UpperCamelCase__ )
else:
__lowerCamelCase = [pos]
__lowerCamelCase = head.next
pos += 1
__lowerCamelCase = pos - 1
__lowerCamelCase = 0
for v in d.values():
if len(UpperCamelCase__ ) % 2 != 0:
middle += 1
else:
__lowerCamelCase = 0
for i in range(0 , len(UpperCamelCase__ ) ):
if v[i] + v[len(UpperCamelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 67
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67
| 1
|
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( __lowerCAmelCase ):
def __init__( self , _A=None , **_A ):
"""simple docstring"""
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 357
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
| 0
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_SCREAMING_SNAKE_CASE = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
_SCREAMING_SNAKE_CASE = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
_SCREAMING_SNAKE_CASE = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple=0.9 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[Any]=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version("""3.6.5""" ):
UpperCamelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase_ ) , word_tokenize(lowerCamelCase_ ) , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
else:
UpperCamelCase = [
meteor_score.single_meteor_score(lowerCamelCase_ , lowerCamelCase_ , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
return {"meteor": np.mean(lowerCamelCase_ )}
| 343
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
# Construct model
if gpta_config_file == "":
lowerCamelCase__ : Dict =GPTaConfig()
else:
lowerCamelCase__ : Tuple =GPTaConfig.from_json_file(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =GPTaModel(__lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
lowerCamelCase__ : List[str] =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCamelCase__ : int =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_lowercase : Any = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 238
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase = {
'google/rembert': 256,
}
__lowerCAmelCase = '▁'
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = RemBertTokenizer
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="[CLS]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<unk>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<pad>" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , **UpperCAmelCase , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase ) )
return
_snake_case = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 270
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "nat"
lowerCAmelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=64 , UpperCAmelCase=[3, 4, 6, 5] , UpperCAmelCase=[2, 4, 8, 16] , UpperCAmelCase=7 , UpperCAmelCase=3.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> str:
super().__init__(**UpperCAmelCase )
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(UpperCAmelCase )
_snake_case = num_heads
_snake_case = kernel_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = layer_norm_eps
_snake_case = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case = int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
_snake_case = layer_scale_init_value
_snake_case = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase ) + 1 )]
_snake_case, _snake_case = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 270
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
| 1
|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
UpperCAmelCase: Optional[Any] = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCAmelCase: Union[str, Any] = BASE_URL + """/user"""
# https://github.com/settings/tokens
UpperCAmelCase: List[str] = os.environ.get("""USER_TOKEN""", """""")
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[Any] = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 336
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_snake_case = logging.get_logger(__name__)
_snake_case = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowercase ( UpperCamelCase__ ):
_a = "dpt"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=384 , _a=16 , _a=3 , _a=False , _a=True , _a=[2, 5, 8, 11] , _a="project" , _a=[4, 2, 1, 0.5] , _a=[96, 192, 384, 768] , _a=256 , _a=-1 , _a=False , _a=True , _a=0.4 , _a=255 , _a=0.1 , _a=[1, 1024, 24, 24] , _a=[0, 1] , _a=None , **_a , ) -> Optional[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
_A : Union[str, Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
_A : Union[str, Any] = BitConfig(**_a )
elif isinstance(_a , _a ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
_A : Union[str, Any] = BitConfig(**_a )
elif isinstance(_a , _a ):
_A : Optional[Any] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_A : Any = backbone_featmap_shape
_A : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
_A : Tuple = None
_A : Union[str, Any] = None
_A : Dict = []
_A : Union[str, Any] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Union[str, Any] = intermediate_size
_A : Tuple = hidden_act
_A : int = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : List[str] = initializer_range
_A : List[Any] = layer_norm_eps
_A : str = image_size
_A : str = patch_size
_A : List[str] = num_channels
_A : int = qkv_bias
_A : Dict = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
_A : int = readout_type
_A : List[str] = reassemble_factors
_A : str = neck_hidden_sizes
_A : Union[str, Any] = fusion_hidden_size
_A : List[Any] = head_in_index
_A : List[Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_A : Tuple = use_auxiliary_head
_A : Union[str, Any] = auxiliary_loss_weight
_A : Tuple = semantic_loss_ignore_index
_A : List[Any] = semantic_classifier_dropout
def a__ ( self ) -> Union[str, Any]:
_A : str = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_A : Tuple = self.backbone_config.to_dict()
_A : Tuple = self.__class__.model_type
return output
| 26
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Optional[int] = {"vocab_file": "spiece.model"}
_lowercase : str = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_lowercase : Optional[Any] = {
"AI-Sweden/gpt-sw3-126m": 2_0_4_8,
"AI-Sweden/gpt-sw3-350m": 2_0_4_8,
"AI-Sweden/gpt-sw3-1.6b": 2_0_4_8,
"AI-Sweden/gpt-sw3-6.7b": 2_0_4_8,
"AI-Sweden/gpt-sw3-20b": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : List[str]=False, lowerCamelCase : str=False, lowerCamelCase : List[Any]=False, lowerCamelCase : List[str]=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : List[str]=None, lowerCamelCase : Optional[int]=None, lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Dict, )-> None:
lowerCamelCase__ : Any ={} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ : Optional[int] =kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCamelCase__ : List[Any] ='''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase__ : Tuple ='''<|endoftext|>''' if eos_token is None else eos_token
lowerCamelCase__ : Dict ='''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase__ : int =unk_token if pad_token is None else pad_token
lowerCamelCase__ : str =eos_token if bos_token is None else bos_token
else:
lowerCamelCase__ : Any ='''<pad>''' if pad_token is None else pad_token
lowerCamelCase__ : str ='''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase, remove_space=lowerCamelCase, keep_accents=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, pad_token=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =do_lower_case
lowerCamelCase__ : Union[str, Any] =remove_space
lowerCamelCase__ : int =keep_accents
lowerCamelCase__ : Tuple =vocab_file
lowerCamelCase__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase__ : List[Any] ={''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase__ : int =re.compile(
F'''[{"".join(map(lowerCamelCase, list(range(0, 9 ) ) + list(range(11, 32 ) ) + list(range(127, 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Any )-> Optional[Any]:
lowerCamelCase__ : List[str] =self.__dict__.copy()
lowerCamelCase__ : List[Any] =None
return state
def __setstate__( self : Optional[Any], lowerCamelCase : Dict )-> int:
lowerCamelCase__ : List[str] =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase__ : Union[str, Any] ={}
lowerCamelCase__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def snake_case ( self : List[str] )-> int:
return len(self.sp_model )
def snake_case ( self : List[str], lowerCamelCase : str )-> str:
lowerCamelCase__ : Dict =self.non_printing_characters_re.sub('''''', lowerCamelCase )
# Normalize whitespaces
lowerCamelCase__ : List[Any] =''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCamelCase__ : Union[str, Any] =unicodedata.normalize('''NFC''', lowerCamelCase )
return text
def snake_case ( self : int, lowerCamelCase : str, **lowerCamelCase : str )-> List[str]:
lowerCamelCase__ : int =self.preprocess_text(lowerCamelCase )
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def snake_case ( self : Any, lowerCamelCase : str )-> int:
return self.sp_model.PieceToId(lowerCamelCase )
def snake_case ( self : Optional[int], lowerCamelCase : int )-> str:
return self.sp_model.IdToPiece(lowerCamelCase )
@staticmethod
def snake_case ( lowerCamelCase : str )-> str:
return out_string
def snake_case ( self : List[Any], lowerCamelCase : List[str] )-> str:
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : int =''''''
lowerCamelCase__ : Any =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
lowerCamelCase__ : Tuple =True
lowerCamelCase__ : Tuple =[]
else:
current_sub_tokens.append(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string
def snake_case ( self : Union[str, Any] )-> Dict[str, int]:
lowerCamelCase__ : Tuple ={self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowerCamelCase__ : Dict =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def snake_case ( self : int, lowerCamelCase : Union[str, List[str]], lowerCamelCase : Union[str, bool] = False )-> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : str =self.preprocess_text(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =self.sp_model.encode(lowerCamelCase )
else:
lowerCamelCase__ : int =[self.preprocess_text(lowerCamelCase ) for t in text]
lowerCamelCase__ : Tuple =self.sp_model.encode(lowerCamelCase )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase__ : Union[str, Any] =torch.tensor(lowerCamelCase )
return token_ids
def snake_case ( self : int, lowerCamelCase : Union[int, List[int]] )-> str:
return self.sp_model.decode(lowerCamelCase )
def snake_case ( self : Tuple, lowerCamelCase : "Conversation" )-> List[int]:
lowerCamelCase__ : Any =[F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
lowerCamelCase__ : str =(
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(lowerCamelCase ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=lowerCamelCase )
| 238
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int]="" ):
"""simple docstring"""
lowercase_ : List[str] = tempfile.mkdtemp()
return os.path.join(__SCREAMING_SNAKE_CASE , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowercase_ : Union[str, Any] = AgentAudio(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__SCREAMING_SNAKE_CASE ) )
# Ensure that the file contains the same value as the original tensor
lowercase_ , lowercase_ : Optional[int] = sf.read(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , torch.tensor(__SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowercase_ : str = get_new_path(suffix='''.wav''' )
sf.write(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1_60_00 )
lowercase_ : Optional[Any] = AgentAudio(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , __SCREAMING_SNAKE_CASE )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = torch.randint(0 , 2_56 , (64, 64, 3) )
lowercase_ : Optional[Any] = AgentImage(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowercase_ : Optional[Any] = Image.open(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = AgentImage(__SCREAMING_SNAKE_CASE )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowercase_ : Union[str, Any] = Image.open(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = AgentImage(__SCREAMING_SNAKE_CASE )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__SCREAMING_SNAKE_CASE ) )
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = '''Hey!'''
lowercase_ : Dict = AgentText(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , agent_type.to_string() )
self.assertEqual(__SCREAMING_SNAKE_CASE , agent_type.to_raw() )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 264
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''gpt_neox'''
def __init__( self , __SCREAMING_SNAKE_CASE=5_04_32 , __SCREAMING_SNAKE_CASE=61_44 , __SCREAMING_SNAKE_CASE=44 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=2_45_76 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.25 , __SCREAMING_SNAKE_CASE=1_00_00 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = vocab_size
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Optional[int] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Tuple = rotary_pct
lowercase_ : Optional[Any] = rotary_emb_base
lowercase_ : Any = attention_dropout
lowercase_ : str = hidden_dropout
lowercase_ : Dict = classifier_dropout
lowercase_ : Tuple = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Union[str, Any] = use_cache
lowercase_ : int = tie_word_embeddings
lowercase_ : Tuple = use_parallel_residual
lowercase_ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
lowercase_ : List[Any] = self.rope_scaling.get('''type''' , __SCREAMING_SNAKE_CASE )
lowercase_ : int = self.rope_scaling.get('''factor''' , __SCREAMING_SNAKE_CASE )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 264
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : str = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103
|
'''simple docstring'''
def _A ( ):
lowercase__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase__ = 6
lowercase__ = 1
lowercase__ = 1901
lowercase__ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase__ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase__ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 164
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=1_3 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Dict=9_9 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : int=4 , UpperCamelCase__ : List[Any]=3_7 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=5_1_2 , UpperCamelCase__ : List[str]=1_6 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : Dict=4 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = FlaxBertModelTester(self )
@slow
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = FlaxBertModel.from_pretrained('bert-base-cased' )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 361
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = len(A__ ) // 2
# choose the middle 3 elements
UpperCamelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249
| 0
|
'''simple docstring'''
from math import pi, sqrt
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(__A ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(__A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
assert gamma(0.5 ) == sqrt(__A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a__ : Any = 1.0
while num:
a__ : List[str] = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 80
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : Tuple = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ : int = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
lowerCamelCase__ : str = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : Tuple , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : str="[UNK]" , _lowerCAmelCase : Union[str, Any]="[SEP]" , _lowerCAmelCase : List[Any]="[PAD]" , _lowerCAmelCase : str="[CLS]" , _lowerCAmelCase : Dict="[MASK]" , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : str , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = do_lower_case
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=None ):
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 225
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __magic_name__ :
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default='Translation' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Optional[Any] ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowercase ( self : Optional[int] ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __magic_name__ :
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default='TranslationVariableLanguages' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __lowercase ( self : List[Any] ):
_a : List[Any] = sorted(set(self.languages ) ) if self.languages else None
_a : List[str] = len(self.languages ) if self.languages else None
def __call__( self : Any ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Tuple ):
_a : Optional[Any] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({', '.join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_a : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_a , _a : List[Any] = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def __lowercase ( self : Optional[Any] ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 107
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCAmelCase = 3
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
print('Generating primitive root of p' )
while True:
_a : List[Any] = random.randrange(3 , lowerCAmelCase_ )
if pow(lowerCAmelCase_ , 2 , lowerCAmelCase_ ) == 1:
continue
if pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) == 1:
continue
return g
def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
_a : int = rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number.
_a : List[str] = primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p.
_a : Any = random.randrange(3 , lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
_a : List[Any] = cryptomath.find_mod_inverse(pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
_a : Tuple = (key_size, e_a, e_a, p)
_a : str = (key_size, d)
return public_key, private_key
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_a , _a : Dict = generate_key(lowerCAmelCase_ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def __lowerCamelCase ( ) -> None:
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 107
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig):
_a = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder):
_a = PandasConfig
def SCREAMING_SNAKE_CASE ( self: Any ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
lowercase :int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
lowercase :Union[str, Any] = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase :Optional[int] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowercase :List[Any] = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase :Optional[Any] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"files": files} ) )
return splits
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase :Any = table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Union[str, Any] ):
for i, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , "rb" ) as f:
lowercase :int = pa.Table.from_pandas(pd.read_pickle(_lowerCAmelCase ) )
yield i, self._cast_table(_lowerCAmelCase )
| 236
| 0
|
import requests
__A = '''YOUR API KEY'''
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str = giphy_api_key ) -> list:
'''simple docstring'''
UpperCAmelCase_= """+""".join(query.split() )
UpperCAmelCase_= F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
UpperCAmelCase_= requests.get(lowerCAmelCase_ ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 359
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__A = logging.get_logger(__name__)
def __a ( lowerCAmelCase_ : Tuple=None ,lowerCAmelCase_ : Optional[Any]=None ) -> Tuple:
'''simple docstring'''
return field(default_factory=lambda: default ,metadata=lowerCAmelCase_ )
@dataclass
class lowercase :
"""simple docstring"""
a__ : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
a__ : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"})
a__ : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."})
a__ : bool = field(default=snake_case__ , metadata={"help": "Use FP16 to accelerate inference."})
a__ : bool = field(default=snake_case__ , metadata={"help": "Benchmark training of model"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Verbose memory tracing"})
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
a__ : bool = field(
default=snake_case__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
a__ : bool = field(default=snake_case__ , metadata={"help": "Trace memory line by line"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Save result to a CSV file"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Save all print statements in a log file"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Whether to print environment information"})
a__ : bool = field(
default=snake_case__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
a__ : str = field(
default=F'inference_time_{round(time())}.csv' , metadata={"help": "CSV filename used if saving time results to csv."} , )
a__ : str = field(
default=F'inference_memory_{round(time())}.csv' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
a__ : str = field(
default=F'train_time_{round(time())}.csv' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
a__ : str = field(
default=F'train_memory_{round(time())}.csv' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
a__ : str = field(
default=F'env_info_{round(time())}.csv' , metadata={"help": "CSV filename used if saving environment information."} , )
a__ : str = field(
default=F'log_{round(time())}.csv' , metadata={"help": "Log filename used if print statements are saved in log."} , )
a__ : int = field(default=3 , metadata={"help": "Times an experiment will be run."})
a__ : bool = field(
default=snake_case__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 277
| 0
|
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE__ ( __A = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(__A ) , __A ) ) as input_file:
_snake_case = [
[int(__A ) for element in line.split(',' )]
for line in input_file.readlines()
]
_snake_case = len(__A )
_snake_case = len(matrix[0] )
_snake_case = [[-1 for _ in range(__A )] for _ in range(__A )]
for i in range(__A ):
_snake_case = matrix[i][0]
for j in range(1 , __A ):
for i in range(__A ):
_snake_case = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __A ):
_snake_case = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_snake_case = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = d_embed
_snake_case = d_proj
_snake_case = cutoffs + [vocab_size]
_snake_case = [0] + self.cutoffs
_snake_case = div_val
_snake_case = self.cutoffs[0]
_snake_case = len(self.cutoffs ) - 1
_snake_case = self.shortlist_size + self.n_clusters
_snake_case = keep_order
_snake_case = []
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.n_clusters > 0:
_snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_weight' )
_snake_case = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.d_embed // (self.div_val**i)
_snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._weight' , )
_snake_case = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = x
if proj is not None:
_snake_case = tf.einsum('ibd,ed->ibe' , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = tf.range(lp_size[0] , dtype=target.dtype )
_snake_case = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = 0
if self.n_clusters == 0:
_snake_case = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = []
_snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case = (target >= l_idx) & (target < r_idx)
_snake_case = tf.where(lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_snake_case = self.out_layers[0][0][l_idx:r_idx]
_snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case = self.out_layers[i][0]
_snake_case = self.out_layers[i][1]
if i == 0:
_snake_case = tf.concat([cur_W, self.cluster_weight] , 0 )
_snake_case = tf.concat([cur_b, self.cluster_bias] , 0 )
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
_snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_snake_case = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_snake_case = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 42
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase : Any = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowercase : List[Any] = {
'''unc-nlp/lxmert-base-uncased''': 5_1_2,
}
lowercase : List[str] = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class A__ ( a__ ):
"""simple docstring"""
__A : Union[str, Any] = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__A : Tuple = PRETRAINED_INIT_CONFIGURATION
__A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = LxmertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
a__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , _lowerCamelCase) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCamelCase) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCamelCase) != tokenize_chinese_chars
):
a__ : List[Any] = getattr(_lowerCamelCase , normalizer_state.pop('type'))
a__ : Optional[int] = do_lower_case
a__ : str = strip_accents
a__ : Tuple = tokenize_chinese_chars
a__ : List[Any] = normalizer_class(**_lowerCamelCase)
a__ : str = do_lower_case
def __lowercase ( self , lowercase , lowercase=None) -> List[Any]:
'''simple docstring'''
a__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : Any = [self.sep_token_id]
a__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : Union[str, Any] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase)
return tuple(_lowerCamelCase)
| 369
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__A : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def A_ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
a__ : Optional[Any] = import_module('tasks' )
try:
a__ : List[Any] = getattr(A__ , model_args.task_type )
a__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a__ : Tuple = token_classification_task.get_labels(data_args.labels )
a__ : Dict[int, str] = dict(enumerate(A__ ) )
a__ : Union[str, Any] = len(A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , idalabel=A__ , labelaid={label: i for i, label in enumerate(A__ )} , cache_dir=model_args.cache_dir , )
a__ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a__ : List[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
# Get datasets
a__ : int = (
TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a__ : Optional[int] = (
TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A__ , A__ ) -> Tuple[List[int], List[int]]:
a__ : Union[str, Any] = np.argmax(A__ , axis=2 )
a__ , a__ : Dict = preds.shape
a__ : Union[str, Any] = [[] for _ in range(A__ )]
a__ : Optional[int] = [[] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A__ ) -> Dict:
a__ , a__ : Union[str, Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A__ , A__ ),
"precision": precision_score(A__ , A__ ),
"recall": recall_score(A__ , A__ ),
"f1": fa_score(A__ , A__ ),
}
# Data collator
a__ : Union[str, Any] = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a__ : List[str] = Trainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Any = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a__ : Optional[Any] = trainer.evaluate()
a__ : List[Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A__ , A__ )
writer.write('%s = %s\n' % (key, value) )
results.update(A__ )
# Predict
if training_args.do_predict:
a__ : Optional[Any] = TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a__ , a__ , a__ : Any = trainer.predict(A__ )
a__ , a__ : Union[str, Any] = align_predictions(A__ , A__ )
a__ : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A__ , A__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
a__ : Tuple = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A__ , A__ , A__ )
return results
def A_ ( A__ ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 225
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
_a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {'source': 'What is love ?', 'target': 'life'}
_UpperCAmelCase = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = "pytorch" ):
"""simple docstring"""
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'output' )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
_UpperCAmelCase = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
_UpperCAmelCase = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 39
| 0
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
snake_case : Any = True
except ImportError:
snake_case : Optional[Any] = False
snake_case : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase_ ( _snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( snake_case ):
@staticmethod
def SCREAMING_SNAKE_CASE ( _a ):
__magic_name__ : Any = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=_a , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=_a , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a=None , *_a ):
__magic_name__ : Dict = testing
__magic_name__ : Tuple = testing_file
__magic_name__ : int = path
def SCREAMING_SNAKE_CASE ( self ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__magic_name__ : Any = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(_a ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
__magic_name__ : Tuple = (
Path(_a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__magic_name__ : List[Any] = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_a ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
__magic_name__ : int = json.load(_a )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_a , extra_context=_a , )
__magic_name__ : Optional[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
__magic_name__ : Any = json.load(_a )
__magic_name__ : Tuple = configuration["lowercase_modelname"]
__magic_name__ : Optional[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'''{directory}/configuration.json''' )
__magic_name__ : List[Any] = "PyTorch" in generate_tensorflow_pytorch_and_flax
__magic_name__ : int = "TensorFlow" in generate_tensorflow_pytorch_and_flax
__magic_name__ : List[Any] = "Flax" in generate_tensorflow_pytorch_and_flax
__magic_name__ : Tuple = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(_a , exist_ok=_a )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=_a )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(_a ):
with open(_a , "r" ) as f:
__magic_name__ : int = f.readlines()
with open(_a , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_a )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_a , _a , _a ):
# Create temp file
__magic_name__ , __magic_name__ : List[str] = mkstemp()
__magic_name__ : Tuple = False
with fdopen(_a , "w" ) as new_file:
with open(_a ) as old_file:
for line in old_file:
new_file.write(_a )
if line_to_copy_below in line:
__magic_name__ : str = True
for line_to_copy in lines_to_copy:
new_file.write(_a )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(_a , _a )
# Remove original file
remove(_a )
# Move new file
move(_a , _a )
def skip_units(_a ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_a ):
with open(_a ) as datafile:
__magic_name__ : Tuple = []
__magic_name__ : str = False
__magic_name__ : Tuple = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__magic_name__ : int = line.split("\"" )[1]
__magic_name__ : Optional[Any] = skip_units(_a )
elif "# Below: " in line and "##" not in line:
__magic_name__ : Any = line.split("\"" )[1]
__magic_name__ : str = skip_units(_a )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_a , _a , _a )
__magic_name__ : Optional[Any] = []
elif "# Replace with" in line and "##" not in line:
__magic_name__ : Dict = []
elif "##" not in line:
lines_to_copy.append(_a )
remove(_a )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(_a )
| 41
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'transfo-xl'
UpperCamelCase__ = ['mems']
UpperCamelCase__ = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=267_735 , _a=[20_000, 40_000, 200_000] , _a=1_024 , _a=1_024 , _a=16 , _a=64 , _a=4_096 , _a=4 , _a=False , _a=18 , _a=1_600 , _a=1_000 , _a=True , _a=True , _a=0 , _a=-1 , _a=True , _a=0.1 , _a=0.0 , _a=True , _a="normal" , _a=0.01 , _a=0.01 , _a=0.02 , _a=1e-5 , _a=0 , **_a , ):
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Dict = []
self.cutoffs.extend(_a )
if proj_share_all_but_first:
__magic_name__ : List[str] = [False] + [True] * len(self.cutoffs )
else:
__magic_name__ : Optional[Any] = [False] + [False] * len(self.cutoffs )
__magic_name__ : Optional[int] = d_model
__magic_name__ : str = d_embed
__magic_name__ : Optional[Any] = d_head
__magic_name__ : Optional[int] = d_inner
__magic_name__ : List[str] = div_val
__magic_name__ : List[str] = pre_lnorm
__magic_name__ : Union[str, Any] = n_layer
__magic_name__ : Optional[int] = n_head
__magic_name__ : str = mem_len
__magic_name__ : int = same_length
__magic_name__ : Dict = attn_type
__magic_name__ : int = clamp_len
__magic_name__ : Optional[int] = sample_softmax
__magic_name__ : List[Any] = adaptive
__magic_name__ : Optional[int] = dropout
__magic_name__ : Optional[int] = dropatt
__magic_name__ : Optional[Any] = untie_r
__magic_name__ : List[str] = init
__magic_name__ : Any = init_range
__magic_name__ : Optional[int] = proj_init_std
__magic_name__ : List[Any] = init_std
__magic_name__ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_a , **_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE ( self , _a ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 41
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTTokenizerFast
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Dict = False
def a ( self : str ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCAmelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
return "lower newer", "lower newer"
def a ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__lowerCAmelCase = '''lower'''
__lowerCAmelCase = ['''low''', '''er</w>''']
__lowerCAmelCase = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = tokens + ['''<unk>''']
__lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
# Simple input
__lowerCAmelCase = '''This is a simple input'''
__lowerCAmelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowerCAmelCase = ('''This is a simple input''', '''This is a pair''')
__lowerCAmelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" , )
def a ( self : Dict ) -> Union[str, Any]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _lowercase ( __lowerCAmelCase ):
'''simple docstring'''
pass
| 229
|
from collections.abc import Callable
def _a ( SCREAMING_SNAKE_CASE : Callable[[float], float] , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
UpperCamelCase__ : float = a
UpperCamelCase__ : float = b
if function(SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCamelCase__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) < 0:
UpperCamelCase__ : Tuple = mid
else:
UpperCamelCase__ : Dict = mid
UpperCamelCase__ : List[str] = start + (end - start) / 2.0
return mid
def _a ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 146
| 0
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__snake_case = logging.get_logger(__name__)
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , UpperCamelCase_ , )
super().__init__(args=UpperCamelCase_ , **UpperCamelCase_ )
| 219
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 , __a = "bert-base-cased" ) -> Any:
'''simple docstring'''
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained(__a )
UpperCamelCase__ :List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :Optional[int] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def a ( __a , __a , __a , __a ) -> str:
'''simple docstring'''
model.eval()
UpperCamelCase__ :List[str] = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :int = model(**__a )
UpperCamelCase__ :Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase__ , UpperCamelCase__ :int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
UpperCamelCase__ :Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Any = config['''lr''']
UpperCamelCase__ :Optional[int] = int(config['''num_epochs'''] )
UpperCamelCase__ :List[Any] = int(config['''seed'''] )
UpperCamelCase__ :List[Any] = int(config['''batch_size'''] )
UpperCamelCase__ :List[Any] = args.model_name_or_path
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Any = get_dataloaders(__a , __a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
UpperCamelCase__ :Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ :Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCamelCase__ :Dict = 1
UpperCamelCase__ :Tuple = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ :Any = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
UpperCamelCase__ :Any = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ :Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
UpperCamelCase__ :List[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase__ :Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase__ :Dict = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCamelCase__ :Tuple = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase__ :Any = int(__a ) + 1
UpperCamelCase__ :Dict = evaluation_loop(__a , __a , __a , __a )
accelerator.print('''resumed checkpoint performance:''' , __a )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.load(__a )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase__ :Optional[Any] = {}
for epoch in range(__a , __a ):
model.train()
for step, batch in enumerate(__a ):
UpperCamelCase__ :Optional[int] = model(**__a )
UpperCamelCase__ :Optional[int] = outputs.loss
UpperCamelCase__ :str = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase__ :Union[str, Any] = f'''epoch_{epoch}'''
UpperCamelCase__ :List[Any] = os.path.join(args.output_dir , __a )
accelerator.save_state(__a )
UpperCamelCase__ :List[Any] = evaluation_loop(__a , __a , __a , __a )
UpperCamelCase__ :int = accuracy
UpperCamelCase__ :List[Any] = lr_scheduler.get_lr()[0]
UpperCamelCase__ :Any = optimizer.param_groups[0]['''lr''']
UpperCamelCase__ :int = epoch
UpperCamelCase__ :Tuple = overall_step
accelerator.print(f'''epoch {epoch}:''' , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(__a , __a )
def a ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__a , )
parser.add_argument(
'''--output_dir''' , type=__a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__a , default=__a , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__a , default=__a , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__a , default=2 , help='''Number of train epochs.''' , )
UpperCamelCase__ :Optional[int] = parser.parse_args()
UpperCamelCase__ :List[str] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 219
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : List[Any] = job["""started_at"""]
lowerCAmelCase__ : int = job["""completed_at"""]
lowerCAmelCase__ : str = date_parser.parse(_lowercase )
lowerCAmelCase__ : Tuple = date_parser.parse(_lowercase )
lowerCAmelCase__ : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase__ : int = start
lowerCAmelCase__ : Optional[int] = end
lowerCAmelCase__ : Optional[Any] = duration_in_min
return job_info
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = None
if token is not None:
lowerCAmelCase__ : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowerCAmelCase__ : Union[str, Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCAmelCase__ : Optional[int] = requests.get(_lowercase , headers=_lowercase ).json()
lowerCAmelCase__ : Optional[Any] = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_lowercase ) for job in result['jobs']} )
lowerCAmelCase__ : int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_lowercase ):
lowerCAmelCase__ : Union[str, Any] = requests.get(url + F'''&page={i + 2}''' , headers=_lowercase ).json()
job_time.update({job['name']: extract_time_from_single_job(_lowercase ) for job in result['jobs']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = get_job_time(args.workflow_run_id)
lowerCamelCase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""")
| 212
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" )
snake_case_ :Any = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
snake_case_ :Optional[int] = args.output + """.pt"""
snake_case_ :List[str] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ :str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ :Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ :Optional[int] = 8
snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :List[str] = torch.tensor(_lowercase )
elif key_name.startswith("""model/moe""" ):
snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ :Dict = key_name[-9:-7]
for i in range(16 ):
snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ :str = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ :Any = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/ln""" ):
snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :int = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.startswith("""model/att""" ):
snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ :Dict = state[:, 0, :, :]
snake_case_ :int = state[:, 1, :, :]
snake_case_ :List[str] = state[:, 2, :, :]
snake_case_ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ :int = torch.tensor(_lowercase )
snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ :Dict = torch.tensor(_lowercase )
snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = torch.tensor(_lowercase )
elif key_name.startswith("""model/an""" ):
snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
if key_name.startswith("""model/wte""" ):
snake_case_ :Tuple = """lm_head.weight"""
snake_case_ :List[str] = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
elif key_name.startswith("""model/wob""" ):
snake_case_ :str = """final_logits_bias"""
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = state.reshape((1, -1) )
snake_case_ :Union[str, Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
snake_case_ :str = """model.last_project.weight"""
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
snake_case_ :Optional[int] = """model.last_project.bias"""
snake_case_ :Tuple = vnp.copy() # same because it is one dimensional
snake_case_ :Any = torch.tensor(_lowercase )
torch.save(_lowercase, args.output )
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__a = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 66
| 0
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCamelCase_ ( lowerCAmelCase__ : Dataset , lowerCAmelCase__ : Dict[str, str] ) -> str:
"""simple docstring"""
lowerCAmelCase_ : str = args.log_outputs
lowerCAmelCase_ : List[str] = '''_'''.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowerCAmelCase_ : List[Any] = load_metric('wer' )
lowerCAmelCase_ : str = load_metric('cer' )
# compute metrics
lowerCAmelCase_ : List[Any] = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowerCAmelCase_ : Any = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowerCAmelCase_ : Union[str, Any] = f"WER: {wer_result}\nCER: {cer_result}"
print(UpperCamelCase__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(UpperCamelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase_ : str = f"log_{dataset_id}_predictions.txt"
lowerCAmelCase_ : Tuple = f"log_{dataset_id}_targets.txt"
with open(UpperCamelCase__ , 'w' ) as p, open(UpperCamelCase__ , 'w' ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(UpperCamelCase__ , with_indices=UpperCamelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : int = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase_ : Any = re.sub(UpperCamelCase__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase_ : Optional[int] = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowerCAmelCase_ : List[str] = ''' '''.join(text.split(UpperCamelCase__ ) )
return text
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Tuple = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCamelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase_ : int = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase_ : Optional[int] = dataset.cast_column('audio' , Audio(sampling_rate=UpperCamelCase__ ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase_ : Tuple = 0 if torch.cuda.is_available() else -1
lowerCAmelCase_ : Any = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase__ : List[Any] ):
lowerCAmelCase_ : Dict = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase_ : Tuple = prediction['''text''']
lowerCAmelCase_ : Tuple = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowerCAmelCase_ : Dict = dataset.map(UpperCamelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `\'en\'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `\'test\'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowercase__ : Any = parser.parse_args()
main(args)
| 352
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowercase__ : str = logging.get_logger(__name__)
def UpperCamelCase_ ( lowerCAmelCase__ : bool , lowerCAmelCase__ : bool ) -> List[Any]:
"""simple docstring"""
def run_func(lowerCAmelCase__ : int ):
@wraps(lowerCAmelCase__ )
def run_in_eager_mode(*lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : int ):
return func(*lowerCAmelCase__ , **lowerCAmelCase__ )
@wraps(lowerCAmelCase__ )
@tf.function(experimental_compile=lowerCAmelCase__ )
def run_in_graph_mode(*lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any ):
return func(*lowerCAmelCase__ , **lowerCAmelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCAmelCase_ : Dict = random.Random()
lowerCAmelCase_ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = "TensorFlow"
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return tf.__version__
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
# initialize GPU on separate process
lowerCAmelCase_ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : List[str] = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_inference )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : Any = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_train )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : Optional[Any] = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_inference )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : Optional[int] = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_train )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Any = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase_ : Union[str, Any] = (
hasattr(SCREAMING_SNAKE_CASE_ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase_ : Any = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase_ : Any = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase_ : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase_ : str = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase_ : List[Any] = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase_ : Tuple = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase_ : Dict = (
hasattr(SCREAMING_SNAKE_CASE_ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase_ : Optional[Any] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase_ : int = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase_ : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase_ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase_ : int = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase_ : Optional[Any] = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : Optional[int] = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : str = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
lowerCAmelCase_ : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(SCREAMING_SNAKE_CASE_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase_ : Dict = timeit.repeat(
SCREAMING_SNAKE_CASE_ , repeat=self.args.repeat , number=1_0 , )
return min(SCREAMING_SNAKE_CASE_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase_ : Union[str, Any] = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase_ : Tuple = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase_ : int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase_ : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = meminfo.used
lowerCAmelCase_ : int = Memory(SCREAMING_SNAKE_CASE_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase_ : Optional[int] = None
else:
lowerCAmelCase_ : Union[str, Any] = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = Memory(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase_ : List[Any] = stop_memory_tracing(SCREAMING_SNAKE_CASE_ )
if memory is None:
lowerCAmelCase_ : Union[str, Any] = summary.total
else:
lowerCAmelCase_ : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 289
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
UpperCamelCase = '''▁'''
class __UpperCAmelCase (lowerCamelCase_ ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : str = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = BigBirdTokenizer
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : int = []
def __init__( self: Dict , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: Dict="<unk>" , UpperCAmelCase_: Optional[Any]="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: int="<pad>" , UpperCAmelCase_: Any="[SEP]" , UpperCAmelCase_: Any="[MASK]" , UpperCAmelCase_: Tuple="[CLS]" , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
_SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
_SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
_SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
_SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
_SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None , UpperCAmelCase_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 306
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
__lowerCamelCase : Any = 5
__lowerCamelCase : Dict = 10
@require_sentencepiece
@require_tokenizers
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = SpeechaTextTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def __a ( self : Tuple ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = sp.SentencePieceProcessor()
spm_model.Load(_lowercase )
SCREAMING_SNAKE_CASE__ = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowercase ) )]
SCREAMING_SNAKE_CASE__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """<pad>"""
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_lowercase ) , 10_01 )
def __a ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class __snake_case ( unittest.TestCase ):
lowerCAmelCase_ = "valhalla/s2t_mustc_multilinguial_medium"
lowerCAmelCase_ = "C'est trop cool"
lowerCAmelCase_ = "Esto es genial"
@classmethod
def __a ( cls : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __a ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def __a ( self : int ):
"""simple docstring"""
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """fr"""
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowercase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 219
| 0
|
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _snake_case :
def __init__( self : List[str], __lowercase : List[str], __lowercase : int, __lowercase : Optional[int], __lowercase : str, __lowercase : Union[str, Any], __lowercase : List[Any], ):
lowercase__ = pos_x
lowercase__ = pos_y
lowercase__ = (pos_y, pos_x)
lowercase__ = goal_x
lowercase__ = goal_y
lowercase__ = g_cost
lowercase__ = parent
lowercase__ = self.calculate_heuristic()
def A__ ( self : int ):
lowercase__ = abs(self.pos_x - self.goal_x )
lowercase__ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[Any], __lowercase : Optional[int] ):
return self.f_cost < other.f_cost
class _snake_case :
def __init__( self : Union[str, Any], __lowercase : List[str], __lowercase : str ):
lowercase__ = Node(start[1], start[0], goal[1], goal[0], 0, _SCREAMING_SNAKE_CASE )
lowercase__ = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, _SCREAMING_SNAKE_CASE )
lowercase__ = [self.start]
lowercase__ = []
lowercase__ = False
def A__ ( self : Union[str, Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowercase__ = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
lowercase__ = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
lowercase__ = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def A__ ( self : Optional[int], __lowercase : int ):
lowercase__ = []
for action in delta:
lowercase__ = parent.pos_x + action[1]
lowercase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, _SCREAMING_SNAKE_CASE, ) )
return successors
def A__ ( self : List[str], __lowercase : Optional[Any] ):
lowercase__ = node
lowercase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase__ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 368
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = r"\w+[.]\d+"
lowercase__ = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
lowercase__ = key.replace(SCREAMING_SNAKE_CASE_ , "_".join(pat.split("." ) ) )
return key
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowercase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase__ = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowercase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowercase__ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowercase__ , lowercase__ = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase__ = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 224
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__a = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
snake_case_ :List[Any] = """lm_head"""
snake_case_ :Union[str, Any] = getattr(_lowercase, _lowercase )
if weight_type is not None:
snake_case_ :str = getattr(_lowercase, _lowercase ).shape
else:
snake_case_ :Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case_ :Any = value
elif weight_type == "weight_g":
snake_case_ :Tuple = value
elif weight_type == "weight_v":
snake_case_ :Optional[int] = value
elif weight_type == "bias":
snake_case_ :Tuple = value
else:
snake_case_ :List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = []
snake_case_ :Union[str, Any] = fairseq_model.state_dict()
snake_case_ :str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ :Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowercase, _lowercase, _lowercase, _lowercase, hf_model.config.feat_extract_norm == """group""", )
snake_case_ :Dict = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ :List[str] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ :List[str] = True
if "*" in mapped_key:
snake_case_ :Tuple = name.split(_lowercase )[0].split(""".""" )[-2]
snake_case_ :Tuple = mapped_key.replace("""*""", _lowercase )
if "weight_g" in name:
snake_case_ :Dict = """weight_g"""
elif "weight_v" in name:
snake_case_ :Dict = """weight_v"""
elif "bias" in name:
snake_case_ :Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ :List[str] = """weight"""
else:
snake_case_ :Optional[Any] = None
set_recursively(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = full_name.split("""conv_layers.""" )[-1]
snake_case_ :List[str] = name.split(""".""" )
snake_case_ :Any = int(items[0] )
snake_case_ :str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case_ :Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case_ :List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case_ :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case_ :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def A_ ( _lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=True ):
'''simple docstring'''
if config_path is not None:
snake_case_ :str = UniSpeechConfig.from_pretrained(_lowercase )
else:
snake_case_ :Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
snake_case_ :Optional[int] = Dictionary.load_from_json(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ :Optional[int] = target_dict.pad_index
snake_case_ :Optional[int] = target_dict.bos_index
snake_case_ :Dict = target_dict.eos_index
snake_case_ :List[str] = len(target_dict.symbols )
snake_case_ :int = os.path.join(_lowercase, """vocab.json""" )
if not os.path.isdir(_lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowercase ) )
return
os.makedirs(_lowercase, exist_ok=_lowercase )
snake_case_ :List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ :Optional[Any] = 42
snake_case_ :List[Any] = 43
with open(_lowercase, """w""", encoding="""utf-8""" ) as vocab_handle:
json.dump(_lowercase, _lowercase )
snake_case_ :Union[str, Any] = WavaVecaPhonemeCTCTokenizer(
_lowercase, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=_lowercase, )
snake_case_ :List[Any] = True if config.feat_extract_norm == """layer""" else False
snake_case_ :Any = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_lowercase, return_attention_mask=_lowercase, )
snake_case_ :str = WavaVecaProcessor(feature_extractor=_lowercase, tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
snake_case_ :Optional[Any] = UniSpeechForCTC(_lowercase )
else:
snake_case_ :str = UniSpeechForPreTraining(_lowercase )
if is_finetuned:
snake_case_, snake_case_, snake_case_ :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
snake_case_, snake_case_, snake_case_ :Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case_ :Any = model[0].eval()
recursively_load_weights(_lowercase, _lowercase, _lowercase )
hf_unispeech.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__a = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 66
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,) -> Optional[int]:
if config_name_or_path is None:
__lowerCamelCase : List[Any] = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
__lowerCamelCase : Optional[int] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowerCamelCase : Tuple = question_encoder_name_or_path
__lowerCamelCase : Union[str, Any] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
__lowerCamelCase : Tuple = RagConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Tuple = gen_config
__lowerCamelCase : List[Any] = question_encoder_config
__lowerCamelCase : str = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase ,_lowerCAmelCase ,config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
__lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 208
| 0
|
import torch
from torch import nn
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False )->List[str]:
'''simple docstring'''
super().__init__()
A_ : List[str] = n_token
A_ : Optional[Any] = d_embed
A_ : Dict = d_proj
A_ : Optional[int] = cutoffs + [n_token]
A_ : Any = [0] + self.cutoffs
A_ : List[str] = div_val
A_ : str = self.cutoffs[0]
A_ : Union[str, Any] = len(self.cutoffs ) - 1
A_ : Optional[int] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
A_ : Dict = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
A_ : Tuple = nn.Parameter(torch.zeros(self.n_clusters ) )
A_ : Tuple = nn.ModuleList()
A_ : Optional[int] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
else:
self.out_projs.append(_SCREAMING_SNAKE_CASE )
self.out_layers.append(nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
self.out_layers.append(nn.Linear(_SCREAMING_SNAKE_CASE , r_idx - l_idx ) )
A_ : List[str] = keep_order
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
if proj is None:
A_ : Optional[int] = nn.functional.linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
A_ : Optional[Any] = nn.functional.linear(_SCREAMING_SNAKE_CASE , proj.t().contiguous() )
A_ : Optional[int] = nn.functional.linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False )->Union[str, Any]:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
A_ : Optional[int] = hidden[..., :-1, :].contiguous()
A_ : Union[str, Any] = labels[..., 1:].contiguous()
A_ : Union[str, Any] = hidden.view(-1 , hidden.size(-1 ) )
A_ : str = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
A_ : Tuple = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
A_ : str = self._compute_logit(_SCREAMING_SNAKE_CASE , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
A_ : Union[str, Any] = labels != -100
A_ : str = torch.zeros_like(_SCREAMING_SNAKE_CASE , dtype=hidden.dtype , device=hidden.device )
A_ : Union[str, Any] = (
-nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
A_ : List[str] = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
else:
# construct weights and biases
A_ , A_ : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A_ , A_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ : Tuple = self.out_layers[0].weight[l_idx:r_idx]
A_ : str = self.out_layers[0].bias[l_idx:r_idx]
else:
A_ : Optional[Any] = self.out_layers[i].weight
A_ : str = self.out_layers[i].bias
if i == 0:
A_ : List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
A_ : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_SCREAMING_SNAKE_CASE )
biases.append(_SCREAMING_SNAKE_CASE )
A_ , A_ , A_ : Optional[int] = weights[0], biases[0], self.out_projs[0]
A_ : Union[str, Any] = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
if labels is None:
A_ : str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
A_ : Tuple = torch.zeros_like(_SCREAMING_SNAKE_CASE , dtype=hidden.dtype , device=hidden.device )
A_ : Any = 0
A_ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
A_ , A_ : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
A_ : int = (labels >= l_idx) & (labels < r_idx)
A_ : Union[str, Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
A_ : Tuple = labels.index_select(0 , _SCREAMING_SNAKE_CASE ) - l_idx
A_ : List[str] = head_logprob.index_select(0 , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = hidden.index_select(0 , _SCREAMING_SNAKE_CASE )
else:
A_ : Optional[Any] = hidden
if i == 0:
if labels is not None:
A_ : Tuple = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
A_ : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
A_ , A_ , A_ : Any = weights[i], biases[i], self.out_projs[i]
A_ : str = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
A_ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
A_ : Optional[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
A_ : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
A_ : Optional[Any] = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _SCREAMING_SNAKE_CASE , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
if self.n_clusters == 0:
A_ : Optional[int] = self._compute_logit(_SCREAMING_SNAKE_CASE , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
else:
# construct weights and biases
A_ , A_ : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A_ , A_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ : Tuple = self.out_layers[0].weight[l_idx:r_idx]
A_ : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
A_ : str = self.out_layers[i].weight
A_ : int = self.out_layers[i].bias
if i == 0:
A_ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
A_ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_SCREAMING_SNAKE_CASE )
biases.append(_SCREAMING_SNAKE_CASE )
A_ , A_ , A_ : str = weights[0], biases[0], self.out_projs[0]
A_ : str = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
A_ : Union[str, Any] = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
A_ : Any = [0] + self.cutoffs
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
A_ , A_ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
A_ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
A_ , A_ , A_ : Optional[Any] = weights[i], biases[i], self.out_projs[i]
A_ : Tuple = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
A_ : int = head_logprob[:, -i] + tail_logprob_i
A_ : List[Any] = logprob_i
return out
| 65
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return EnvironmentCommand()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return EnvironmentCommand(args.accelerate_config_file )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : str = parser.add_parser('''env''' )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_SCREAMING_SNAKE_CASE , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Optional[Any] = accelerate_config_file
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Tuple = '''not installed'''
if is_safetensors_available():
import safetensors
A_ : Any = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
A_ : Optional[Any] = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ : Union[str, Any] = '''not installed'''
A_ : List[Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
A_ : str = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ : List[Any] = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else F'''\t{accelerate_config}'''
)
A_ : Optional[int] = '''not installed'''
A_ : str = '''NA'''
if is_torch_available():
import torch
A_ : Tuple = torch.__version__
A_ : List[Any] = torch.cuda.is_available()
A_ : int = '''not installed'''
A_ : Any = '''NA'''
if is_tf_available():
import tensorflow as tf
A_ : str = tf.__version__
try:
# deprecated in v2.1
A_ : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ : Any = bool(tf.config.list_physical_devices('''GPU''' ) )
A_ : Union[str, Any] = '''not installed'''
A_ : Tuple = '''not installed'''
A_ : Tuple = '''not installed'''
A_ : Union[str, Any] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
A_ : Tuple = flax.__version__
A_ : List[Any] = jax.__version__
A_ : List[Any] = jaxlib.__version__
A_ : Dict = jax.lib.xla_bridge.get_backend().platform
A_ : Union[str, Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 65
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = (KDPMaDiscreteScheduler,)
__A = 10
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
a = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCamelCase_ )
return config
def UpperCamelCase_ (self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(prediction_type="v_prediction" )
a = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma
a = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
a = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
a = model(lowerCamelCase_ , lowerCamelCase_ )
a = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = output.prev_sample
a = torch.sum(torch.abs(lowerCamelCase_ ) )
a = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
if torch_device == "mps":
return
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma
a = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
a = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
a = model(lowerCamelCase_ , lowerCamelCase_ )
a = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = output.prev_sample
a = torch.sum(torch.abs(lowerCamelCase_ ) )
a = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
if torch_device == "mps":
return
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase_ )
a = self.dummy_model()
a = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
a = model(lowerCamelCase_ , lowerCamelCase_ )
a = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = output.prev_sample
a = torch.sum(torch.abs(lowerCamelCase_ ) )
a = torch.mean(torch.abs(lowerCamelCase_ ) )
if str(lowerCamelCase_ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 227
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_lowercase: Tuple = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def a( A : Optional[Any] ) -> str:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def a( A : Dict , A : List[Any] , A : str ) -> List[str]:
"""simple docstring"""
return max(metric_fn(A , A ) for gt in ground_truths )
def a( A : str , A : Optional[Any] , A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = [line.strip() for line in open(A , "r" ).readlines()]
a = []
if args.gold_data_mode == "qa":
a = pd.read_csv(A , sep="\t" , header=A )
for answer_list in data[1]:
a = ast.literal_eval(A )
answers.append(A )
else:
a = [line.strip() for line in open(A , "r" ).readlines()]
a = [[reference] for reference in references]
a = a = a = 0
for prediction, ground_truths in zip(A , A ):
total += 1
em += metric_max_over_ground_truths(A , A , A )
fa += metric_max_over_ground_truths(A , A , A )
a = 100.0 * em / total
a = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def a( A : Dict , A : str , A : List[str] ) -> List[Any]:
"""simple docstring"""
a = args.k
a = [line.strip() for line in open(A , "r" ).readlines()]
a = [line.strip() for line in open(A , "r" ).readlines()]
a = a = 0
for hypo, reference in zip(A , A ):
a = set(hypo.split("\t" )[:k] )
a = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def a( A : Dict , A : Any , A : List[Any] ) -> Any:
"""simple docstring"""
def strip_title(A : Any ):
if title.startswith("\"" ):
a = title[1:]
if title.endswith("\"" ):
a = title[:-1]
return title
a = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A , return_tensors="pt" , padding=A , truncation=A , )["input_ids"].to(args.device )
a = rag_model.rag.question_encoder(A )
a = question_enc_outputs[0]
a = rag_model.retriever(
A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a = []
for docs in all_docs:
a = [strip_title(A ) for title in docs["title"]]
provenance_strings.append("\t".join(A ) )
return provenance_strings
def a( A : Union[str, Any] , A : Optional[int] , A : Tuple ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
a = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A , return_tensors="pt" , padding=A , truncation=A )
a = inputs_dict.input_ids.to(args.device )
a = inputs_dict.attention_mask.to(args.device )
a = rag_model.generate( # rag_model overwrites generate
A , attention_mask=A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a = rag_model.retriever.generator_tokenizer.batch_decode(A , skip_special_tokens=A )
if args.print_predictions:
for q, a in zip(A , A ):
logger.info("Q: {} - A: {}".format(A , A ) )
return answers
def a( ) -> Any:
"""simple docstring"""
a = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=A , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=A , choices=["exact", "compressed", "legacy"] , type=A , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=A , type=A , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=A , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=A , type=A , required=A , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=A , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=A , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=A , type=A , required=A , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=A , type=A , required=A , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=A , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=A , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=A , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=A , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=A , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=A , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a = parser.parse_args()
a = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def a( A : Any ) -> Optional[Any]:
"""simple docstring"""
a = {}
if args.model_type is None:
a = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a = args.n_docs
if args.index_name is not None:
a = args.index_name
if args.index_path is not None:
a = args.index_path
else:
a = BartForConditionalGeneration
a = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , A )
a = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(A , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(A ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a = RagRetriever.from_pretrained(A , **A )
a = model_class.from_pretrained(A , retriever=A , **A )
model.retriever.init_retrieval()
else:
a = model_class.from_pretrained(A , **A )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a = []
for line in tqdm(A ):
questions.append(line.strip() )
if len(A ) == args.eval_batch_size:
a = evaluate_batch_fn(A , A , A )
preds_file.write("\n".join(A ) + "\n" )
preds_file.flush()
a = []
if len(A ) > 0:
a = evaluate_batch_fn(A , A , A )
preds_file.write("\n".join(A ) )
preds_file.flush()
score_fn(A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_lowercase: Optional[int] = get_args()
main(args)
| 227
| 1
|
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
if isinstance(a_ , a_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(a_ , a_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
__A = False
if num < 0:
__A = True
__A = -num
__A = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(a_ ) for e in binary )
return "0b" + "".join(str(a_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "facebook/bart-large-mnli"
snake_case_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
snake_case_ = "text_classifier"
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSequenceClassification
snake_case_ = ["text", ["text"]]
snake_case_ = ["text"]
def UpperCamelCase_ ( self : str ):
super().setup()
__A = self.model.config
__A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
__A = int(A )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Dict ):
__A = labels
return self.pre_processor(
[text] * len(A ) ,[f'''This example is {label}''' for label in labels] ,return_tensors="pt" ,padding="max_length" ,)
def UpperCamelCase_ ( self : Union[str, Any] ,A : Tuple ):
__A = outputs.logits
__A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 124
| 1
|
def A_ ( a , a , a=False ):
"""simple docstring"""
if isinstance(a , a ) and isinstance(a , a ):
SCREAMING_SNAKE_CASE_ : int = len(set_a.intersection(a ) )
if alternative_union:
SCREAMING_SNAKE_CASE_ : Dict = len(a ) + len(a )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = len(set_a.union(a ) )
return intersection / union
if isinstance(a , (list, tuple) ) and isinstance(a , (list, tuple) ):
SCREAMING_SNAKE_CASE_ : Tuple = [element for element in set_a if element in set_b]
if alternative_union:
SCREAMING_SNAKE_CASE_ : List[str] = len(a ) + len(a )
return len(a ) / union
else:
SCREAMING_SNAKE_CASE_ : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(a ) / len(a )
return len(a ) / len(a )
return None
if __name__ == "__main__":
lowerCAmelCase : Any = {"""a""", """b""", """c""", """d""", """e"""}
lowerCAmelCase : List[Any] = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 253
|
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> None:
lowercase__ : str = [2, 1, 2, -1]
lowercase__ : str = [1, 2, 3, 4]
def _lowerCAmelCase( self ) -> list[float]:
lowercase__ : Optional[Any] = len(self.first_signal )
lowercase__ : Union[str, Any] = len(self.second_signal )
lowercase__ : int = max(__lowerCAmelCase , __lowerCAmelCase )
# create a zero matrix of max_length x max_length
lowercase__ : List[str] = [[0] * max_length for i in range(__lowerCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCAmelCase ):
lowercase__ : int = deque(self.second_signal )
rotated_signal.rotate(__lowerCAmelCase )
for j, item in enumerate(__lowerCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase__ : Optional[int] = np.matmul(np.transpose(__lowerCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 198
| 0
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = XLMProphetNetTokenizer
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = True
def __lowerCAmelCase ( self ) ->Dict:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[Any] = XLMProphetNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[str] = '''[PAD]'''
SCREAMING_SNAKE_CASE : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_lowerCamelCase ) , 1012 )
def __lowerCAmelCase ( self ) ->List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = XLMProphetNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self ) ->List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Hello World!'''
SCREAMING_SNAKE_CASE : int = [3_5389, 6672, 49, 2]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def __lowerCAmelCase ( self ) ->int:
# fmt: off
SCREAMING_SNAKE_CASE : str = {'''input_ids''': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 19
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[Any] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''DeiTFeatureExtractor''']
a__ : Any = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19
| 1
|
"""simple docstring"""
from __future__ import annotations
_a : Union[str, Any]= [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_a : Union[str, Any]= [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __UpperCAmelCase ( UpperCAmelCase_ : list[float] ) -> list[float]:
'''simple docstring'''
__snake_case : Union[str, Any] = []
__snake_case : List[Any] = len(UpperCAmelCase_ )
for i in range(UpperCAmelCase_ ):
__snake_case : float = -1
for j in range(i + 1 , UpperCAmelCase_ ):
if arr[i] < arr[j]:
__snake_case : Any = arr[j]
break
result.append(UpperCAmelCase_ )
return result
def __UpperCAmelCase ( UpperCAmelCase_ : list[float] ) -> list[float]:
'''simple docstring'''
__snake_case : str = []
for i, outer in enumerate(UpperCAmelCase_ ):
__snake_case : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
__snake_case : Optional[int] = inner
break
result.append(UpperCAmelCase_ )
return result
def __UpperCAmelCase ( UpperCAmelCase_ : list[float] ) -> list[float]:
'''simple docstring'''
__snake_case : List[Any] = len(UpperCAmelCase_ )
__snake_case : list[float] = []
__snake_case : list[float] = [-1] * arr_size
for index in reversed(range(UpperCAmelCase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__snake_case : List[str] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_a : Optional[Any]= (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 172
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any]= logging.get_logger(__name__)
_a : str= {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = """mgp-str"""
def __init__(self : List[Any] , _A : Dict=[32, 1_28] , _A : Any=4 , _A : int=3 , _A : Any=27 , _A : List[str]=38 , _A : str=5_02_57 , _A : Optional[int]=3_05_22 , _A : Union[str, Any]=7_68 , _A : Tuple=12 , _A : List[str]=12 , _A : List[str]=4.0 , _A : Optional[int]=True , _A : Optional[Any]=False , _A : Dict=1E-5 , _A : Optional[int]=0.0 , _A : str=0.0 , _A : int=0.0 , _A : str=False , _A : List[Any]=0.02 , **_A : Union[str, Any] , ) -> Tuple:
super().__init__(**_A)
__snake_case : Union[str, Any] = image_size
__snake_case : Optional[int] = patch_size
__snake_case : int = num_channels
__snake_case : int = max_token_length
__snake_case : List[Any] = num_character_labels
__snake_case : Optional[int] = num_bpe_labels
__snake_case : Optional[Any] = num_wordpiece_labels
__snake_case : int = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Any = mlp_ratio
__snake_case : List[str] = distilled
__snake_case : List[Any] = layer_norm_eps
__snake_case : List[Any] = drop_rate
__snake_case : Optional[int] = qkv_bias
__snake_case : Optional[int] = attn_drop_rate
__snake_case : int = drop_path_rate
__snake_case : List[str] = output_aa_attentions
__snake_case : Optional[Any] = initializer_range
| 172
| 1
|
import os
lowerCAmelCase : int = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00}
def A_( A : str):
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowerCamelCase__) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A_( A : int):
UpperCamelCase = ''''''
UpperCamelCase = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A_( A : str = "/p089_roman.txt"):
UpperCamelCase = 0
with open(os.path.dirname(lowerCamelCase__) + roman_numerals_filename) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowerCamelCase__)
UpperCamelCase = generate_roman_numerals(lowerCamelCase__)
savings += len(lowerCamelCase__) - len(lowerCamelCase__)
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 354
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> int:
'''simple docstring'''
super().__init__()
UpperCamelCase = torchvision.models.resnetaaa(pretrained=A_ )
UpperCamelCase = list(model.children() )[:-2]
UpperCamelCase = nn.Sequential(*A_ )
UpperCamelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.pool(self.model(A_ ) )
UpperCamelCase = torch.flatten(A_ , start_dim=2 )
UpperCamelCase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ , A_ , A_ , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = [json.loads(A_ ) for l in open(A_ )]
UpperCamelCase = os.path.dirname(A_ )
UpperCamelCase = tokenizer
UpperCamelCase = labels
UpperCamelCase = len(A_ )
UpperCamelCase = max_seq_length
UpperCamelCase = transforms
def __len__( self )-> Union[str, Any]:
'''simple docstring'''
return len(self.data )
def __getitem__( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=A_ ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase = sentence[0], sentence[1:-1], sentence[-1]
UpperCamelCase = sentence[: self.max_seq_length]
UpperCamelCase = torch.zeros(self.n_classes )
UpperCamelCase = 1
UpperCamelCase = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
UpperCamelCase = self.transforms(A_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def A_( A : Union[str, Any]):
UpperCamelCase = [len(row['sentence']) for row in batch]
UpperCamelCase , UpperCamelCase = len(A), max(A)
UpperCamelCase = torch.zeros(A , A , dtype=torch.long)
UpperCamelCase = torch.zeros(A , A , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(A , A)):
UpperCamelCase = input_row['sentence']
UpperCamelCase = 1
UpperCamelCase = torch.stack([row['image'] for row in batch])
UpperCamelCase = torch.stack([row['label'] for row in batch])
UpperCamelCase = torch.stack([row['image_start_token'] for row in batch])
UpperCamelCase = torch.stack([row['image_end_token'] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A_( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A_( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
])
| 251
| 0
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OmegaConf.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = torch.load(__UpperCamelCase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE__ = list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = """first_stage_model."""
for key in keys:
if key.startswith(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = """model.diffusion_model."""
for key in keys:
if key.startswith(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = state_dict[key]
SCREAMING_SNAKE_CASE__ = config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE__ = config.model.params.unet_config.params
SCREAMING_SNAKE_CASE__ = VQModel(**__UpperCamelCase ).eval()
vqvae.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = UNetLDMModel(**__UpperCamelCase ).eval()
unet.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , )
SCREAMING_SNAKE_CASE__ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipeline.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__lowerCamelCase : str = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 219
|
import doctest
from collections import deque
import numpy as np
class __snake_case :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(_lowercase , _lowercase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(_lowercase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(_lowercase )
for j, item in enumerate(_lowercase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(_lowercase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_lowercase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 219
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar("""T""")
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
def __init__( self : Optional[Any] , lowerCamelCase_ : bool = True ):
"""simple docstring"""
UpperCamelCase = {} # dictionary of lists
UpperCamelCase = directed
def lowerCamelCase_ ( self : int , lowerCamelCase_ : T , lowerCamelCase_ : T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
self.adj_list[destination_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
UpperCamelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase_ )
UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
UpperCamelCase = [destination_vertex]
UpperCamelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
UpperCamelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
UpperCamelCase = [destination_vertex]
UpperCamelCase = []
return self
def __repr__( self : Optional[int] ):
"""simple docstring"""
return pformat(self.adj_list )
| 165
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be trained."} )
__lowerCAmelCase = field(
default="./", metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path of training dataset."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} )
__lowerCAmelCase = field(default=2, metadata={"help": "Batch size for training."} )
__lowerCAmelCase = field(default=2, metadata={"help": "Batch size for evaluation."} )
__lowerCAmelCase = field(default=0.1, metadata={"help": "Value of weight decay."} )
__lowerCAmelCase = field(
default=10000, metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
__lowerCAmelCase = field(default=2e-4, metadata={"help": "Learning rate fo training."} )
__lowerCAmelCase = field(default="cosine", metadata={"help": "Learning rate."} )
__lowerCAmelCase = field(
default=750, metadata={"help": "Number of warmup steps in the learning rate schedule."} )
__lowerCAmelCase = field(
default=16, metadata={"help": "Number of gradient accumulation steps."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
__lowerCAmelCase = field(default=50000, metadata={"help": "Maximum number of training steps."} )
__lowerCAmelCase = field(
default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
__lowerCAmelCase = field(default=1024, metadata={"help": "Sequence lengths used for training."} )
__lowerCAmelCase = field(default=1, metadata={"help": "Training seed."} )
__lowerCAmelCase = field(
default=1024, metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "States path if the training should continue from a checkpoint folder."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} )
__lowerCAmelCase = field(default=2, metadata={"help": "Batch size used for evaluation."} )
__lowerCAmelCase = field(
default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
__lowerCAmelCase = field(default=1024, metadata={"help": "Length of sequences to be evaluated."} )
__lowerCAmelCase = field(default=1, metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Number of workers used for code evaluation."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Sample from the language model's output distribution."} )
__lowerCAmelCase = field(default=0.2, metadata={"help": "Sampling temperature used for generation."} )
__lowerCAmelCase = field(default=256, metadata={"help": "Maximum number of newly generated tokens."} )
__lowerCAmelCase = field(default=0, metadata={"help": "Top-k parameter used for generation."} )
__lowerCAmelCase = field(default=0.9_5, metadata={"help": "Top-p parameter used for nucleus sampling."} )
__lowerCAmelCase = field(default=10, metadata={"help": "Number of generations to run in parallel."} )
__lowerCAmelCase = field(
default=200, metadata={"help": "Number of completions to generate for each sample."} )
__lowerCAmelCase = field(default=1, metadata={"help": "Random seed used for evaluation."} )
__lowerCAmelCase = field(
default="eval_results.json", metadata={"help": "Random seed used for evaluation."} )
__lowerCAmelCase = field(
default="0", metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
__lowerCAmelCase = field(
default=-1, metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
}, )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
}, )
__lowerCAmelCase = field(
default="transformersbook/codeparrot", metadata={"help": "Folder or name of dataset to process."} )
__lowerCAmelCase = field(
default="codeparrot-clean", metadata={"help": "Folder to save processed processed dataset."} )
__lowerCAmelCase = field(
default=100000, metadata={"help": "Number of files to save per JSON output file."} )
__lowerCAmelCase = field(default="content", metadata={"help": "Column containing text data to process."} )
__lowerCAmelCase = field(
default=1000, metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=100, metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=0.2_5, metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=1.5, metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=0.7, metadata={"help": "Probability for filtering config, test and uncommon files."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "If True, near-duplicate samples are removed."} )
__lowerCAmelCase = field(
default=0.8_5, metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="gpt2", metadata={"help": "Base tokenizer to build new tokenizer from."} )
__lowerCAmelCase = field(
default="transformersbook/codeparrot-train", metadata={"help": "Dataset to train tokenizer on."} )
__lowerCAmelCase = field(default="content", metadata={"help": "Column containing text data to process."} )
__lowerCAmelCase = field(default=200000, metadata={"help": "Number of examples to train tokenizer on."} )
__lowerCAmelCase = field(
default=32768, metadata={"help": "Number of examples to train the tokenizer on."} )
__lowerCAmelCase = field(default="codeparrot", metadata={"help": "Name of new tokenizer."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path to the dataset to pretokenize."} )
__lowerCAmelCase = field(
default="tokenized-codeparrot-train", metadata={"help": "Repo name of the pretokenized data."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="gpt2-large", metadata={"help": "Configuration to use for model initialization."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Tokenizer attached to model."} )
__lowerCAmelCase = field(default="codeparrot", metadata={"help": "Name of the created model."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Push saved tokenizer to the hub."} )
| 81
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'efficientnet'
def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-5
| 289
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _snake_case :
'''simple docstring'''
A__ : Any = MBartConfig
A__ : Tuple = {}
A__ : Optional[Any] = '''gelu'''
def __init__( self: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int]=13 ,lowerCamelCase_: List[str]=7 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[Any]=False ,lowerCamelCase_: Union[str, Any]=99 ,lowerCamelCase_: Optional[Any]=32 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: Union[str, Any]=37 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=20 ,lowerCamelCase_: List[Any]=2 ,lowerCamelCase_: int=1 ,lowerCamelCase_: Tuple=0 ,) -> List[str]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = eos_token_id
UpperCAmelCase_ : Dict = pad_token_id
UpperCAmelCase_ : Optional[Any] = bos_token_id
def A__ ( self: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Tuple = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : Tuple = prepare_mbart_inputs_dict(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
return config, inputs_dict
def A__ ( self: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = TFMBartModel(config=UpperCamelCase_ ).get_decoder()
UpperCAmelCase_ : Dict = inputs_dict["""input_ids"""]
UpperCAmelCase_ : List[str] = input_ids[:1, :]
UpperCAmelCase_ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase_ : Optional[int] = inputs_dict["""head_mask"""]
UpperCAmelCase_ : int = 1
# first forward pass
UpperCAmelCase_ : Any = model(UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,head_mask=UpperCamelCase_ ,use_cache=UpperCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = outputs.to_tuple()
UpperCAmelCase_ : Optional[int] = past_key_values[1]
def lowerCamelCase_ ( _a : List[str] , _a : Optional[Any] , _a : Union[str, Any] , _a : Optional[int]=None , _a : Any=None , _a : List[Any]=None , _a : Any=None , _a : str=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A__ : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A__ : Optional[int] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : Optional[Any] = True
A__ : Tuple = False
A__ : Any = False
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = TFMBartModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self ,config_class=UpperCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
self.config_tester.run_common_tests()
def A__ ( self: int ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : Dict = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
A__ : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
A__ : int = '''facebook/mbart-large-en-ro'''
@cached_property
def A__ ( self: str ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A__ ( self: List[Any] ,**lowerCamelCase_: Tuple ) -> Tuple:
UpperCAmelCase_ : Tuple = self.translate_src_text(**UpperCamelCase_ )
self.assertListEqual(self.expected_text ,UpperCamelCase_ )
def A__ ( self: int ,**lowerCamelCase_: Tuple ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(self.src_text ,**UpperCamelCase_ ,return_tensors="""tf""" )
UpperCAmelCase_ : List[str] = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
UpperCAmelCase_ : str = self.tokenizer.batch_decode(UpperCamelCase_ ,skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def A__ ( self: Optional[Any] ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 354
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = ["pixel_values"]
def __init__( self: Optional[Any] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Dict[str, int]] = None ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: bool = True ,lowerCamelCase_: bool = True ,lowerCamelCase_: Union[int, float] = 1 / 255 ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,**lowerCamelCase_: Union[str, Any] ,) -> None:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = size if size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ,param_name="""crop_size""" )
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : Union[str, Any] = do_rescale
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : Optional[int] = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Any = resample
UpperCAmelCase_ : Tuple = rescale_factor
UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self: List[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Optional[int] ,) -> np.ndarray:
UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(lowerCamelCase_ ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase_ : Tuple = (size["""height"""], size["""width"""])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: List[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: str ,) -> np.ndarray:
UpperCAmelCase_ : Dict = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: float ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: List[str] ) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Union[str, Any] ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Any ,lowerCamelCase_: ImageInput ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: int = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[float] = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase_: List[str] ,) -> BatchFeature:
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : str = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : List[str] = get_size_dict(lowerCamelCase_ )
if not is_batched(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = [images]
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Tuple = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : int = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Optional[int] = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : str = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Dict = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : Dict = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 59
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.