code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import requests
_snake_case = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def A ( _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = "new" , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowerCamelCase ) - valid_terms ) ):
_lowerCAmelCase : int = F"Invalid search term: {invalid_search_terms}"
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = requests.get(
F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_lowerCAmelCase : List[str] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowerCamelCase )}
_lowerCAmelCase : str = {}
for id_ in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 36
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase__ :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
self.register_modules(
vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,unet=A__ ,scheduler=A__ ,safety_checker=A__ ,feature_extractor=A__ ,)
def A__ ( self ,A__ = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__)
def A__ ( self):
self.enable_attention_slicing(A__)
@torch.no_grad()
def __call__( self ,A__ ,A__ = 5_1_2 ,A__ = 5_1_2 ,A__ = 5_0 ,A__ = 7.5 ,A__ = None ,A__ = 1 ,A__ = 0.0 ,A__ = None ,A__ = None ,A__ = "pil" ,A__ = True ,A__ = None ,A__ = 1 ,A__ = None ,**A__ ,):
if isinstance(A__ ,A__):
lowercase = 1
elif isinstance(A__ ,A__):
lowercase = len(A__)
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A__)}')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__)}.')
# get prompt text embeddings
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=self.tokenizer.model_max_length ,return_tensors='''pt''' ,)
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}')
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 ,A__ ,1)
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt ,A__ ,-1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = ['''''']
elif type(A__) is not type(A__):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A__)} !='
f' {type(A__)}.')
elif isinstance(A__ ,A__):
lowercase = [negative_prompt]
elif batch_size != len(A__):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A__)}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''')
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=A__ ,truncation=A__ ,return_tensors='''pt''' ,)
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(A__ ,A__ ,1)
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt ,A__ ,-1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(
A__ ,generator=A__ ,device='''cpu''' ,dtype=A__).to(self.device)
lowercase = torch.randn(A__ ,generator=A__ ,device='''cpu''' ,dtype=A__).to(
self.device)
else:
lowercase = torch.randn(
A__ ,generator=A__ ,device=self.device ,dtype=A__)
lowercase = torch.randn(A__ ,generator=A__ ,device=self.device ,dtype=A__)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
lowercase = latents_reference.to(self.device)
lowercase = latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase = 0 if dx < 0 else dx
lowercase = 0 if dy < 0 else dy
lowercase = max(-dx ,0)
lowercase = max(-dy ,0)
# import pdb
# pdb.set_trace()
lowercase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A__)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(A__)):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(A__ ,A__)
# predict the noise residual
lowercase = self.unet(A__ ,A__ ,encoder_hidden_states=A__).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2)
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__ ,A__)
lowercase = 1 / 0.18215 * latents
lowercase = self.vae.decode(A__).sample
lowercase = (image / 2 + 0.5).clamp(0 ,1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 ,2 ,3 ,1).float().numpy()
if self.safety_checker is not None:
lowercase = self.feature_extractor(self.numpy_to_pil(A__) ,return_tensors='''pt''').to(
self.device)
lowercase , lowercase = self.safety_checker(
images=A__ ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
lowercase = None
if output_type == "pil":
lowercase = self.numpy_to_pil(A__)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A__ ,nsfw_content_detected=A__)
| 101
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=True ,_snake_case=True ,_snake_case=99 ,_snake_case=32 ,_snake_case=5 ,_snake_case=4 ,_snake_case=37 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=16 ,_snake_case=2 ,_snake_case=0.02 ,_snake_case=3 ,_snake_case=4 ,_snake_case=None ,):
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : Any = num_choices
UpperCAmelCase_ : List[Any] = scope
UpperCAmelCase_ : int = self.vocab_size - 1
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
UpperCAmelCase_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,*_snake_case ):
UpperCAmelCase_ : Any = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Dict = model(_snake_case ,token_type_ids=_snake_case ,head_mask=_snake_case )
UpperCAmelCase_ : Tuple = model(_snake_case ,token_type_ids=_snake_case )
UpperCAmelCase_ : List[str] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,*_snake_case ):
UpperCAmelCase_ : Optional[Any] = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_snake_case ,token_type_ids=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,*_snake_case ):
UpperCAmelCase_ : Optional[int] = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Tuple = model(_snake_case ,token_type_ids=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,*_snake_case ):
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : List[Any] = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = model(_snake_case ,token_type_ids=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[Any] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__A : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__A : Optional[Any] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=False ):
UpperCAmelCase_ : Dict = super()._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase_ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = inputs_dict["labels"]
UpperCAmelCase_ : Dict = inputs_dict["labels"]
UpperCAmelCase_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_snake_case ,)
UpperCAmelCase_ : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_snake_case )
return inputs_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = OpenAIGPTModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=_snake_case ,n_embd=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[str] = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class _snake_case (unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(_snake_case )
UpperCAmelCase_ : str = torch.tensor([[4_81, 47_35, 5_44]] ,dtype=torch.long ,device=_snake_case ) # the president is
UpperCAmelCase_ : Optional[int] = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase_ : Optional[Any] = model.generate(_snake_case ,do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() ,_snake_case )
| 67
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ):
UpperCAmelCase_ : List[Any] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : Any = kwargs.pop("padding_side" ,"right" )
UpperCAmelCase_ : int = kwargs.pop("return_attention_mask" ,_snake_case )
super().__init__(**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = True ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = None ,):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(_snake_case ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_snake_case ) == 0:
if return_attention_mask:
UpperCAmelCase_ : List[str] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : Tuple = required_input[0]
if isinstance(_snake_case ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : int = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_snake_case ):
UpperCAmelCase_ : str = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_snake_case ):
UpperCAmelCase_ : Any = "tf"
elif is_torch_tensor(_snake_case ):
UpperCAmelCase_ : Optional[int] = "pt"
elif isinstance(_snake_case ,(int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : Any = "np"
else:
raise ValueError(
f'''type of {first_element} unknown: {type(_snake_case )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
UpperCAmelCase_ : Optional[Any] = to_numpy(_snake_case )
else:
UpperCAmelCase_ : Any = [to_numpy(_snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : List[Any] = self._get_padding_strategies(padding=_snake_case ,max_length=_snake_case )
UpperCAmelCase_ : Dict = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : str = len(_snake_case )
if not all(len(_snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : Dict = []
for i in range(_snake_case ):
UpperCAmelCase_ : List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : Dict = self._truncate(
_snake_case ,max_length=_snake_case ,pad_to_multiple_of=_snake_case ,truncation=_snake_case ,)
truncated_inputs.append(_snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : List[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : str = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : Dict = {}
for i in range(_snake_case ):
# padding
UpperCAmelCase_ : Dict = self._pad(
truncated_inputs[i] ,max_length=_snake_case ,padding_strategy=_snake_case ,pad_to_multiple_of=_snake_case ,return_attention_mask=_snake_case ,)
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : str = value.astype(np.floataa )
batch_outputs[key].append(_snake_case )
return BatchFeature(_snake_case ,tensor_type=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = PaddingStrategy.DO_NOT_PAD ,_snake_case = None ,_snake_case = None ,):
UpperCAmelCase_ : Any = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Any = len(_snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : List[str] = np.ones(len(_snake_case ) ,dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Union[str, Any] = max_length - len(_snake_case )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : str = np.pad(
processed_features["attention_mask"] ,(0, difference) )
UpperCAmelCase_ : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : int = np.pad(
_snake_case ,_snake_case ,"constant" ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] ,(difference, 0) )
UpperCAmelCase_ : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : Union[str, Any] = np.pad(
_snake_case ,_snake_case ,"constant" ,constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : List[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = len(_snake_case ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : str = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self ,_snake_case=False ,_snake_case=None ):
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : str = PaddingStrategy(_snake_case )
elif isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = padding
else:
UpperCAmelCase_ : List[str] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 67
| 1
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
UpperCAmelCase_ = False
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict=32 ):
set_seed(0 )
__lowerCamelCase = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3 )
__lowerCamelCase = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__lowerCamelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
__lowerCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__lowerCamelCase = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCAmelCase__ ) for _ in range(4 )]
__lowerCamelCase = [torch.randn((4, 3, 32, 32) ).to(lowerCAmelCase__ ) for _ in range(4 )]
__lowerCamelCase = [torch.randint(0 , 10_00 , (4,) ).long().to(lowerCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
__lowerCamelCase = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__lowerCamelCase = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__lowerCamelCase = model(lowerCAmelCase__ , timesteps[i] ).sample
__lowerCamelCase = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__lowerCamelCase = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__lowerCamelCase = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__lowerCamelCase = model(lowerCAmelCase__ , timesteps[i] ).sample
__lowerCamelCase = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
| 12
|
'''simple docstring'''
from __future__ import annotations
__a = list[tuple[int, int]]
__a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__a = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float , lowerCAmelCase__ : Node | None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[str] = pos_x
_UpperCAmelCase : List[Any] = pos_y
_UpperCAmelCase : Optional[int] = (pos_y, pos_x)
_UpperCAmelCase : Tuple = goal_x
_UpperCAmelCase : List[str] = goal_y
_UpperCAmelCase : str = g_cost
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : str = self.calculate_heuristic()
def _lowerCAmelCase ( self : str ) -> float:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = abs(self.pos_x - self.goal_x )
_UpperCAmelCase : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , lowerCAmelCase__ : Optional[int] ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : tuple[int, int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase__ )
_UpperCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = [self.start]
_UpperCAmelCase : list[Node] = []
_UpperCAmelCase : List[Any] = False
def _lowerCAmelCase ( self : Tuple ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_UpperCAmelCase : List[str] = True
return self.retrace_path(lowerCAmelCase__ )
self.closed_nodes.append(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.get_successors(lowerCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase__ )
else:
# retrieve the best current path
_UpperCAmelCase : List[Any] = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase__ )
else:
self.open_nodes.append(lowerCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Node ) -> list[Node]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = []
for action in delta:
_UpperCAmelCase : Tuple = parent.pos_x + action[1]
_UpperCAmelCase : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase__ , ) )
return successors
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Node | None ) -> Path:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = node
_UpperCAmelCase : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__a = (0, 0)
__a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__a = GreedyBestFirst(init, goal)
__a = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__a = 2
for elem in grid:
print(elem)
| 145
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Any = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345
| 0
|
'''simple docstring'''
import random
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ = False ):
__UpperCamelCase : dict = {i: [] for i in range(snake_case__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case__ ):
for j in range(i + 1 , snake_case__ ):
if random.random() < probability:
graph[i].append(snake_case__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case__ )
return graph
def __lowerCAmelCase ( snake_case__ ):
return {
i: [j for j in range(snake_case__ ) if i != j] for i in range(snake_case__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 1
|
from math import isqrt, loga
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = False
return [i for i in range(2 , _lowercase ) if is_prime[i]]
def A ( _lowercase = 800_800 , _lowercase = 800_800 ):
SCREAMING_SNAKE_CASE : Optional[Any] = degree * loga(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = int(_lowercase )
SCREAMING_SNAKE_CASE : Dict = calculate_prime_numbers(_lowercase )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = len(_lowercase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 258
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE : List[Any] = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE : str = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 258
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case_ = BitConfig(
conv_layer=__UpperCAmelCase, num_labels=1000, idalabel=__UpperCAmelCase, labelaid=__UpperCAmelCase, )
return config
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
snake_case_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
snake_case_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
snake_case_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
snake_case_ = '''bit.encoder.''' + name
return name
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=False ) -> str:
'''simple docstring'''
snake_case_ = get_config(__UpperCAmelCase )
# load original model from timm
snake_case_ = create_model(__UpperCAmelCase, pretrained=__UpperCAmelCase )
timm_model.eval()
# load state_dict of original model
snake_case_ = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(__UpperCAmelCase )
snake_case_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
snake_case_ = BitForImageClassification(__UpperCAmelCase )
model.eval()
model.load_state_dict(__UpperCAmelCase )
# create image processor
snake_case_ = create_transform(**resolve_data_config({}, model=__UpperCAmelCase ) )
snake_case_ = transform.transforms
snake_case_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ = BitImageProcessor(
do_resize=__UpperCAmelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__UpperCAmelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__UpperCAmelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
snake_case_ = prepare_img()
snake_case_ = transform(__UpperCAmelCase ).unsqueeze(0 )
snake_case_ = processor(__UpperCAmelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCAmelCase, __UpperCAmelCase )
# verify logits
with torch.no_grad():
snake_case_ = model(__UpperCAmelCase )
snake_case_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case_ = timm_model(__UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase, outputs.logits, atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
a : Any = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 56
|
import unittest
from knapsack import greedy_knapsack as kp
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
snake_case_ = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
snake_case_ = [2, 4, 6, 8, 1_0, 1_2]
snake_case_ = 1_0_0
self.assertEqual(kp.calc_profit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , 2_1_0 )
def lowerCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "max_weight must greater than zero." )
def lowerCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "Weight can not be negative." )
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "Profit can not be negative." )
def lowerCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "max_weight must greater than zero." )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertRaisesRegex(
_lowerCAmelCase , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 159
| 0
|
'''simple docstring'''
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def _a( ):
'''simple docstring'''
assert and_gate(0, 0 ) == 0
assert and_gate(0, 1 ) == 0
assert and_gate(1, 0 ) == 0
assert and_gate(1, 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 358
|
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =checkpoints.load_tax_checkpoint(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
SCREAMING_SNAKE_CASE__ : str ='''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ : Dict ='''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE__ : Dict =f"layers_{str(UpperCamelCase__ )}"
# Self-Attention
SCREAMING_SNAKE_CASE__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
SCREAMING_SNAKE_CASE__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
SCREAMING_SNAKE_CASE__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
SCREAMING_SNAKE_CASE__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
SCREAMING_SNAKE_CASE__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
SCREAMING_SNAKE_CASE__ : int =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
SCREAMING_SNAKE_CASE__ : Tuple =flax_model.params['''encoder''']['''block'''][str(UpperCamelCase__ )]['''layer''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tax_attention_key
SCREAMING_SNAKE_CASE__ : Optional[int] =tax_attention_out
SCREAMING_SNAKE_CASE__ : int =tax_attention_query
SCREAMING_SNAKE_CASE__ : List[str] =tax_attention_value
SCREAMING_SNAKE_CASE__ : Dict =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tax_global_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ : List[str] =tax_mlp_wi_a
SCREAMING_SNAKE_CASE__ : Any =tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE__ : Tuple =tax_mlp_wi
SCREAMING_SNAKE_CASE__ : int =tax_mlp_wo
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tax_mlp_layer_norm
SCREAMING_SNAKE_CASE__ : Union[str, Any] =flax_model_encoder_layer_block
# Only for layer 0:
SCREAMING_SNAKE_CASE__ : Tuple =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE__ : Any =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ : Optional[Any] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE__ : List[str] =tax_encoder_global_rel_embedding
# Assigning
SCREAMING_SNAKE_CASE__ : List[str] =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
SCREAMING_SNAKE_CASE__ : int =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE__ : int =f"layers_{str(UpperCamelCase__ )}"
# Self-Attention
SCREAMING_SNAKE_CASE__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
SCREAMING_SNAKE_CASE__ : int =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
SCREAMING_SNAKE_CASE__ : List[str] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
SCREAMING_SNAKE_CASE__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ : Tuple =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
SCREAMING_SNAKE_CASE__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
SCREAMING_SNAKE_CASE__ : Tuple =tax_enc_dec_attention_module['''key''']['''kernel''']
SCREAMING_SNAKE_CASE__ : Any =tax_enc_dec_attention_module['''out''']['''kernel''']
SCREAMING_SNAKE_CASE__ : List[Any] =tax_enc_dec_attention_module['''query''']['''kernel''']
SCREAMING_SNAKE_CASE__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ : Tuple =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
SCREAMING_SNAKE_CASE__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
SCREAMING_SNAKE_CASE__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
SCREAMING_SNAKE_CASE__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
SCREAMING_SNAKE_CASE__ : Tuple =flax_model.params['''decoder''']['''block'''][str(UpperCamelCase__ )]['''layer''']
SCREAMING_SNAKE_CASE__ : Tuple =tax_attention_key
SCREAMING_SNAKE_CASE__ : Optional[Any] =tax_attention_out
SCREAMING_SNAKE_CASE__ : int =tax_attention_query
SCREAMING_SNAKE_CASE__ : List[str] =tax_attention_value
SCREAMING_SNAKE_CASE__ : List[str] =tax_pre_attention_layer_norm
SCREAMING_SNAKE_CASE__ : List[str] =tax_enc_dec_attention_key
SCREAMING_SNAKE_CASE__ : Optional[int] =tax_enc_dec_attention_out
SCREAMING_SNAKE_CASE__ : str =tax_enc_dec_attention_query
SCREAMING_SNAKE_CASE__ : str =tax_enc_dec_attention_value
SCREAMING_SNAKE_CASE__ : Optional[int] =tax_cross_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ : List[str] =tax_mlp_wi_a
SCREAMING_SNAKE_CASE__ : int =tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE__ : List[str] =tax_mlp_wi
SCREAMING_SNAKE_CASE__ : Optional[Any] =tax_mlp_wo
SCREAMING_SNAKE_CASE__ : str =txa_mlp_layer_norm
SCREAMING_SNAKE_CASE__ : List[Any] =flax_model_decoder_layer_block
# Decoder Normalization
SCREAMING_SNAKE_CASE__ : Any =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
SCREAMING_SNAKE_CASE__ : Optional[int] =txa_decoder_norm
# Only for layer 0:
SCREAMING_SNAKE_CASE__ : Dict =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE__ : Tuple =tax_decoder_rel_embedding
# Token Embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] =tax_model['''target''']['''token_embedder''']['''embedding''']
SCREAMING_SNAKE_CASE__ : List[Any] =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
SCREAMING_SNAKE_CASE__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(UpperCamelCase__ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
a_ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 222
| 0
|
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowerCamelCase ( ) -> str:
__SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=a_ , default=a_ , required=a_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=a_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=a_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=a_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=a_ , default=0 , help='''cuda_id.''' , )
__SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
return args
def __lowerCamelCase ( a_ : Tuple , a_ : str , a_ : str ) -> Optional[Any]:
if not len(a_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :int = imgs[0].size
__SCREAMING_SNAKE_CASE :Union[str, Any] = Image.new('''RGB''' , size=(cols * w, rows * h) )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = grid.size
for i, img in enumerate(a_ ):
grid.paste(a_ , box=(i % cols * w, i // cols * h) )
return grid
def __lowerCamelCase ( a_ : str , a_ : int="robotic cat with wings" , a_ : List[Any]=7.5 , a_ : Any=50 , a_ : Optional[Any]=1 , a_ : Optional[Any]=42 , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :str = torch.Generator(pipeline.device ).manual_seed(a_ )
__SCREAMING_SNAKE_CASE :str = pipeline(
a_ , guidance_scale=a_ , num_inference_steps=a_ , generator=a_ , num_images_per_prompt=a_ , ).images
__SCREAMING_SNAKE_CASE :List[Any] = int(math.sqrt(a_ ) )
__SCREAMING_SNAKE_CASE :Tuple = image_grid(a_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCamelCase_ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCamelCase_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
lowerCamelCase_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
lowerCamelCase_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
lowerCamelCase_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCamelCase_ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
lowerCamelCase_ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
lowerCamelCase_ = unet.to(torch.device("cuda", args.cuda_id))
lowerCamelCase_ = pipeline.to(unet.device)
lowerCamelCase_ , lowerCamelCase_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
lowerCamelCase_ = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 191
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''llama'''
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''past_key_values''']
def __init__( self ,SCREAMING_SNAKE_CASE__=3_20_00 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=1_10_08 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__="silu" ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-6 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE :int = max_position_embeddings
__SCREAMING_SNAKE_CASE :List[str] = hidden_size
__SCREAMING_SNAKE_CASE :Tuple = intermediate_size
__SCREAMING_SNAKE_CASE :List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__SCREAMING_SNAKE_CASE :Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE :str = num_key_value_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE :List[str] = initializer_range
__SCREAMING_SNAKE_CASE :Union[str, Any] = rms_norm_eps
__SCREAMING_SNAKE_CASE :Dict = pretraining_tp
__SCREAMING_SNAKE_CASE :Optional[Any] = use_cache
__SCREAMING_SNAKE_CASE :Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,tie_word_embeddings=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = self.rope_scaling.get('''type''' ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = self.rope_scaling.get('''factor''' ,SCREAMING_SNAKE_CASE__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 191
| 1
|
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = word.split()
def justify(A__ ,A__ ,A__ ) -> str:
UpperCAmelCase_ : Any = max_width - width
UpperCAmelCase_ : Optional[int] = len(__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase_ : List[str] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase_ : List[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase_ : List[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase_ : List[Any] = []
for i in range(__UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCamelCase )
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = 0
for word in words:
if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCamelCase )
width += len(__UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) )
# reset new line and new width
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = [word], len(__UpperCamelCase )
UpperCAmelCase_ : str = max_width - width - len(__UpperCamelCase )
answer.append(" ".join(__UpperCamelCase ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 369
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case ( A__ ,A__ ):
assert isinstance(A__ ,A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Union[str, Any] = ParquetDatasetReader(A__ ,cache_dir=A__ ,keep_in_memory=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Any = features.copy() if features else default_expected_features
UpperCAmelCase_ : int = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : List[Any] = ParquetDatasetReader(A__ ,features=A__ ,cache_dir=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : int = ParquetDatasetReader(A__ ,cache_dir=A__ ,split=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" ,[str, list] )
def snake_case ( A__ ,A__ ,A__ ):
if issubclass(A__ ,A__ ):
UpperCAmelCase_ : int = parquet_path
elif issubclass(A__ ,A__ ):
UpperCAmelCase_ : Any = [parquet_path]
UpperCAmelCase_ : Dict = tmp_path / "cache"
UpperCAmelCase_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Tuple = ParquetDatasetReader(A__ ,cache_dir=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
def snake_case ( A__ ,A__ ,A__=("train",) ):
assert isinstance(A__ ,A__ )
for split in splits:
UpperCAmelCase_ : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Dict = ParquetDatasetReader(
{"train": parquet_path} ,cache_dir=A__ ,keep_in_memory=A__ ).read()
_check_parquet_datasetdict(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = tmp_path / "cache"
UpperCAmelCase_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : int = features.copy() if features else default_expected_features
UpperCAmelCase_ : int = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Any = ParquetDatasetReader({"train": parquet_path} ,features=A__ ,cache_dir=A__ ).read()
_check_parquet_datasetdict(A__ ,A__ )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def snake_case ( A__ ,A__ ,A__ ):
if split:
UpperCAmelCase_ : Optional[Any] = {split: parquet_path}
else:
UpperCAmelCase_ : Union[str, Any] = "train"
UpperCAmelCase_ : Dict = {"train": parquet_path, "test": parquet_path}
UpperCAmelCase_ : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : str = ParquetDatasetReader(A__ ,cache_dir=A__ ).read()
_check_parquet_datasetdict(A__ ,A__ ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : str = ParquetDatasetWriter(A__ ,tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCAmelCase_ : List[str] = pq.ParquetFile(tmp_path / "foo.parquet" )
UpperCAmelCase_ : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[str] = str(shared_datadir / "test_image_rgb.jpg" )
UpperCAmelCase_ : Optional[Any] = {"image": [image_path]}
UpperCAmelCase_ : Optional[Any] = Features({"image": Image()} )
UpperCAmelCase_ : List[Any] = Dataset.from_dict(A__ ,features=A__ )
UpperCAmelCase_ : str = ParquetDatasetWriter(A__ ,tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCAmelCase_ : Tuple = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
UpperCAmelCase_ : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) ,streaming=A__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" ,[
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def snake_case ( A__ ,A__ ):
assert get_writer_batch_size(A__ ) == expected
| 253
| 0
|
"""simple docstring"""
import argparse
from collections import defaultdict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : List[Any] = f.readlines()
UpperCAmelCase_ : int = f"""class {class_name}("""
UpperCAmelCase_ : Optional[Any] = f"""{4 * " "}def {test_name}("""
UpperCAmelCase_ : Optional[Any] = f"""{8 * " "}{correct_line.split()[0]}"""
UpperCAmelCase_ : Tuple = f"""{16 * " "}{correct_line.split()[0]}"""
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = []
for line in lines:
if line.startswith(__lowerCamelCase ):
UpperCAmelCase_ : Tuple = True
elif in_class and line.startswith(__lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = True
elif in_class and in_func and (line.startswith(__lowerCamelCase ) or line.startswith(__lowerCamelCase )):
UpperCAmelCase_ : Any = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_ : Union[str, Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_ : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
UpperCAmelCase_ : int = False
else:
new_lines.append(__lowerCamelCase )
with open(__lowerCamelCase, "w" ) as f:
for line in new_lines:
f.write(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase=None ):
if fail is not None:
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : Tuple = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_ : str = None
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
UpperCAmelCase_ : Any = defaultdict(__lowerCamelCase )
for line in correct_lines:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_a = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 61
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 99
| 0
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : float
__SCREAMING_SNAKE_CASE : TreeNode | None = None
__SCREAMING_SNAKE_CASE : TreeNode | None = None
def __snake_case ( SCREAMING_SNAKE_CASE__ : TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(SCREAMING_SNAKE_CASE__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE__ : TreeNode | None , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE__ )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE__ , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCAmelCase : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_lowerCAmelCase : Tuple = spec.loader.load_module()
_lowerCAmelCase : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowerCAmelCase : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_lowerCAmelCase : Optional[int] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __snake_case ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase : Union[str, Any] = False
# source code of `config_class`
_UpperCAmelCase : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase : List[Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Optional[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : Optional[Any] = True
break
_UpperCAmelCase : int = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : List[str] = "\n".join(sorted(SCREAMING_SNAKE_CASE__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 202
| 0
|
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return EnvironmentCommand()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class lowerCAmelCase__ ( lowerCamelCase_ ):
@staticmethod
def _snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = parser.add_parser('''env''' )
download_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
download_parser.add_argument(
'''--accelerate-config_file''' , default=__SCREAMING_SNAKE_CASE , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = accelerate_config_file
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = '''not installed'''
if is_safetensors_available():
import safetensors
lowercase_ : int = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
lowercase_ : Any = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowercase_ : Optional[Any] = '''not installed'''
lowercase_ : str = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowercase_ : Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
lowercase_ : List[str] = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else F'''\t{accelerate_config}'''
)
lowercase_ : str = '''not installed'''
lowercase_ : Any = '''NA'''
if is_torch_available():
import torch
lowercase_ : str = torch.__version__
lowercase_ : Dict = torch.cuda.is_available()
lowercase_ : Optional[int] = '''not installed'''
lowercase_ : str = '''NA'''
if is_tf_available():
import tensorflow as tf
lowercase_ : int = tf.__version__
try:
# deprecated in v2.1
lowercase_ : List[Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowercase_ : Dict = bool(tf.config.list_physical_devices('''GPU''' ) )
lowercase_ : List[str] = '''not installed'''
lowercase_ : Dict = '''not installed'''
lowercase_ : List[Any] = '''not installed'''
lowercase_ : Optional[Any] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
lowercase_ : str = flax.__version__
lowercase_ : List[str] = jax.__version__
lowercase_ : int = jaxlib.__version__
lowercase_ : Any = jax.lib.xla_bridge.get_backend().platform
lowercase_ : Dict = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def _snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 93
|
from collections import deque
from math import floor
from random import random
from time import time
class a__ :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
A__ = {}
def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple:
'''simple docstring'''
if self.graph.get(lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A__ = [[w, v]]
if not self.graph.get(lowercase ):
A__ = []
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return list(self.graph )
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def UpperCamelCase ( self , lowercase=-2 ) -> Any:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase ( self , lowercase=-2 ) -> str:
'''simple docstring'''
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
A__ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return sorted_nodes
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any:
'''simple docstring'''
A__ = time()
self.dfs(lowercase , lowercase )
A__ = time()
return end - begin
def UpperCamelCase ( self , lowercase=-2 ) -> int:
'''simple docstring'''
A__ = time()
self.bfs(lowercase )
A__ = time()
return end - begin
class a__ :
"""simple docstring"""
def __init__( self ) -> int:
'''simple docstring'''
A__ = {}
def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A__ = [[w, v]]
# add the other way
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A__ = [[w, u]]
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
# the other way round
if self.graph.get(lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def UpperCamelCase ( self , lowercase=-1 ) -> str:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def UpperCamelCase ( self , lowercase=-2 ) -> Dict:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return list(self.graph )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
A__ = time()
self.dfs(lowercase , lowercase )
A__ = time()
return end - begin
def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]:
'''simple docstring'''
A__ = time()
self.bfs(lowercase )
A__ = time()
return end - begin
| 68
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 14
|
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14
| 1
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
a = to_pil_image(__lowerCamelCase )
a , a = pil_image.size
a = pytesseract.image_to_data(__lowerCamelCase , lang=__lowerCamelCase , output_type="""dict""" , config=__lowerCamelCase )
a , a , a , a , a = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
a = [idx for idx, word in enumerate(__lowerCamelCase ) if not word.strip()]
a = [word for idx, word in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
a = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
a = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
a = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
a = [coord for idx, coord in enumerate(__lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
a = []
for x, y, w, h in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
a = [x, y, x + w, y + h]
actual_boxes.append(__lowerCamelCase )
# finally, normalize the bounding boxes
a = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''pixel_values''']
def __init__( self :str , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ :bool = True , __magic_name__ :float = 1 / 255 , __magic_name__ :bool = True , __magic_name__ :Union[float, Iterable[float]] = None , __magic_name__ :Union[float, Iterable[float]] = None , __magic_name__ :bool = True , __magic_name__ :Optional[str] = None , __magic_name__ :Optional[str] = "" , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = size if size is not None else {"""height""": 224, """width""": 224}
a = get_size_dict(__magic_name__ )
a = do_resize
a = size
a = resample
a = do_rescale
a = rescale_value
a = do_normalize
a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a = image_std if image_std is not None else IMAGENET_STANDARD_STD
a = apply_ocr
a = ocr_lang
a = tesseract_config
def lowerCamelCase__ ( self :str , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :List[str] , ):
'''simple docstring'''
a = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
a = (size["""height"""], size["""width"""])
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :np.ndarray , __magic_name__ :Union[int, float] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Optional[int] , ):
'''simple docstring'''
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :np.ndarray , __magic_name__ :Union[float, Iterable[float]] , __magic_name__ :Union[float, Iterable[float]] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Optional[int] , ):
'''simple docstring'''
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :int , __magic_name__ :ImageInput , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :Optional[int]=None , __magic_name__ :bool = None , __magic_name__ :float = None , __magic_name__ :bool = None , __magic_name__ :Union[float, Iterable[float]] = None , __magic_name__ :Union[float, Iterable[float]] = None , __magic_name__ :bool = None , __magic_name__ :Optional[str] = None , __magic_name__ :Optional[str] = None , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :ChannelDimension = ChannelDimension.FIRST , **__magic_name__ :Union[str, Any] , ):
'''simple docstring'''
a = do_resize if do_resize is not None else self.do_resize
a = size if size is not None else self.size
a = get_size_dict(__magic_name__ )
a = resample if resample is not None else self.resample
a = do_rescale if do_rescale is not None else self.do_rescale
a = rescale_factor if rescale_factor is not None else self.rescale_factor
a = do_normalize if do_normalize is not None else self.do_normalize
a = image_mean if image_mean is not None else self.image_mean
a = image_std if image_std is not None else self.image_std
a = apply_ocr if apply_ocr is not None else self.apply_ocr
a = ocr_lang if ocr_lang is not None else self.ocr_lang
a = tesseract_config if tesseract_config is not None else self.tesseract_config
a = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
a = [to_numpy_array(__magic_name__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
a = []
a = []
for image in images:
a , a = apply_tesseract(__magic_name__ , __magic_name__ , __magic_name__ )
words_batch.append(__magic_name__ )
boxes_batch.append(__magic_name__ )
if do_resize:
a = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
a = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
a = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
a = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
a = BatchFeature(data={"""pixel_values""": images} , tensor_type=__magic_name__ )
if apply_ocr:
a = words_batch
a = boxes_batch
return data
| 228
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__UpperCamelCase : Dict[Optional[str], str] = {}
__UpperCamelCase : Dict[Optional[str], Exception] = {}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ) -> Optional[int]:
a = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
a = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
a = format_type
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ) -> List[str]:
a = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
a = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
__UpperCamelCase : str = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
__UpperCamelCase : List[str] = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
__UpperCamelCase : List[str] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def __A ( __lowerCamelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __A ( __lowerCamelCase , **__lowerCamelCase ) -> Formatter:
a = get_format_type_from_alias(__lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 228
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_( unittest.TestCase ):
def __init__( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : str=3 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : List[str]=1_0 , UpperCamelCase_ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase_ : Any=[1, 1, 2, 1] , UpperCamelCase_ : int=True , UpperCamelCase_ : str=True , UpperCamelCase_ : str="relu" , UpperCamelCase_ : int=3 , UpperCamelCase_ : Optional[Any]=None , ):
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : Optional[int] = embeddings_size
lowerCAmelCase : Any = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : int = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = num_labels
lowerCAmelCase : str = scope
lowerCAmelCase : Optional[int] = len(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def lowerCamelCase__ ( self : Tuple ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Dict = FlaxRegNetModel(config=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Tuple = self.num_labels
lowerCAmelCase : Dict = FlaxRegNetForImageClassification(config=UpperCamelCase_ )
lowerCAmelCase : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase : str = config_and_inputs
lowerCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = FlaxRegNetModelTester(self )
lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : List[str] ):
return
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowerCamelCase__ ( self : Any ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowerCamelCase__ ( self : Any ):
pass
def lowerCamelCase__ ( self : int ):
lowerCAmelCase, lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ )
lowerCAmelCase : List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
def check_hidden_states_output(UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = model_class(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
lowerCAmelCase, lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Tuple = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Union[str, Any] = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Dict = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : Dict = prepare_img()
lowerCAmelCase : List[Any] = image_processor(images=UpperCamelCase_ , return_tensors='''np''' )
lowerCAmelCase : Optional[Any] = model(**UpperCamelCase_ )
# verify the logits
lowerCAmelCase : int = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 314
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Dict = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 0
return updated_arr
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
# compute the shape of the output matrix
lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 1
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
__lowerCamelCase = self.builder.as_dataset(
split='train' , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__lowerCamelCase = dataset
__lowerCamelCase = name
__lowerCamelCase = con
__lowerCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCamelCase = num_proc
__lowerCamelCase = to_sql_kwargs
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.to_sql_kwargs.pop('sql' , lowerCamelCase__ )
__lowerCamelCase = self.to_sql_kwargs.pop('con' , lowerCamelCase__ )
__lowerCamelCase = self.to_sql_kwargs.pop('index' , lowerCamelCase__ )
__lowerCamelCase = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = args
__lowerCamelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
__lowerCamelCase = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCamelCase = batch.to_pandas()
__lowerCamelCase = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__lowerCamelCase , __lowerCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 90
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''EncodecFeatureExtractor'''
snake_case_ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = self.feature_extractor
__lowerCamelCase = False
def lowercase_ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase__ , language=lowerCamelCase__ , no_timestamps=lowerCamelCase__ )
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('audio' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('sampling_rate' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('text' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase = args[0]
__lowerCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
__lowerCamelCase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if audio is not None:
__lowerCamelCase = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowerCamelCase = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
__lowerCamelCase = audio_inputs['padding_mask']
return inputs
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = kwargs.pop('audio' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('padding_mask' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase = args[0]
__lowerCamelCase = args[1:]
if audio_values is not None:
return self._decode_audio(lowerCamelCase__ , padding_mask=lowerCamelCase__ )
else:
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[np.ndarray]:
'''simple docstring'''
__lowerCamelCase = to_numpy(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = audio_values.shape
if padding_mask is None:
return list(lowerCamelCase__ )
__lowerCamelCase = to_numpy(lowerCamelCase__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowerCamelCase = seq_len - padding_mask.shape[-1]
__lowerCamelCase = 1 - self.feature_extractor.padding_value
__lowerCamelCase = np.pad(lowerCamelCase__ , ((0, 0), (0, difference)) , 'constant' , constant_values=lowerCamelCase__ )
__lowerCamelCase = audio_values.tolist()
for i in range(lowerCamelCase__ ):
__lowerCamelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowerCamelCase = sliced_audio.reshape(lowerCamelCase__ , -1 )
return audio_values
| 90
| 1
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 0, _UpperCAmelCase = 0 ) -> list:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = end or len(_UpperCAmelCase )
for i in range(_UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase : List[Any] = i
lowerCAmelCase : List[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCAmelCase : Dict = array[temp_index - 1]
temp_index -= 1
lowerCAmelCase : Tuple = temp_index_value
return array
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> None: # Max Heap
'''simple docstring'''
lowerCAmelCase : List[Any] = index
lowerCAmelCase : int = 2 * index + 1 # Left Node
lowerCAmelCase : Dict = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCAmelCase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCAmelCase : Tuple = right_index
if largest != index:
lowerCAmelCase , lowerCAmelCase : Dict = array[largest], array[index]
heapify(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> list:
'''simple docstring'''
lowerCAmelCase : Dict = len(_UpperCAmelCase )
for i in range(n // 2, -1, -1 ):
heapify(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
for i in range(n - 1, 0, -1 ):
lowerCAmelCase , lowerCAmelCase : Dict = array[0], array[i]
heapify(_UpperCAmelCase, 0, _UpperCAmelCase )
return array
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Tuple = low
lowerCAmelCase : Optional[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCAmelCase , lowerCAmelCase : int = array[j], array[i]
i += 1
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return array
lowerCAmelCase : Optional[Any] = 2 * math.ceil(math.loga(len(_UpperCAmelCase ) ) )
lowerCAmelCase : Tuple = 16
return intro_sort(_UpperCAmelCase, 0, len(_UpperCAmelCase ), _UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_UpperCAmelCase )
max_depth -= 1
lowerCAmelCase : List[str] = median_of_a(_UpperCAmelCase, _UpperCAmelCase, start + ((end - start) // 2) + 1, end - 1 )
lowerCAmelCase : Dict = partition(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
intro_sort(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : str = p
return insertion_sort(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Any = input('''Enter numbers separated by a comma : ''').strip()
__A : Optional[int] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 323
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCamelCase : int = get_tests_dir('fixtures')
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 500
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_lowerCAmelCase ) as mock_head:
A = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A (self : Dict ):
# This test is for deprecated behavior and can be removed in v5
A = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def A (cls : int ):
A = TOKEN
HfFolder.save_token(_lowerCAmelCase )
@classmethod
def A (cls : Tuple ):
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def A (self : Union[str, Any] ):
A = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
A = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase , repo_id="""test-feature-extractor""" , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
A = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
def A (self : Tuple ):
A = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
A = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
A = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
def A (self : List[Any] ):
CustomFeatureExtractor.register_for_auto_class()
A = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
A = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 258
|
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __a ( UpperCAmelCase ) ->int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Tuple ):
A = {}
A = []
A = 1
A = [1, 2]
A = {"""a""": 1, """b""": 2}
A = {"""a""": [1, 2], """b""": [3, 4]}
A = {"""a""": {"""1""": 1}, """b""": 2}
A = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {"""a""": 2, """b""": 3}
A = {"""a""": [2, 3], """b""": [4, 5]}
A = {"""a""": {"""1""": 2}, """b""": 3}
A = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
A = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
A = {"""a""": 2, """b""": 0, """c""": 2}
A = {
"""a""": np.eye(2 ).astype(_lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(_lowerCAmelCase ),
"""c""": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def A (self : List[Any] ):
A = {"""a""": 1, """b""": 2}
A = {"""a""": 3, """b""": 4}
A = {"""a""": 5, """b""": 6}
A = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
A = {f"""{i}""": i for i in range(UpperCAmelCase )}
A = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def A (self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A (self : Tuple ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A (self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = A(x=1 , y="""foobar""" )
A = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCAmelCase ) == expected_output
A = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
A = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return text.split()
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) ->Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCAmelCase ) == 4
| 258
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : List[str] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Any = 0
def UpperCAmelCase_ ( self ) -> Dict:
A_ : str = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[int] = Path(_lowerCamelCase ) / """preprocessor_config.json"""
A_ : List[str] = Path(_lowerCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_lowerCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowerCamelCase , """w""" ) )
A_ : Dict = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[str] = Path(_lowerCamelCase ) / """preprocessor_config.json"""
A_ : Tuple = Path(_lowerCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_lowerCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowerCamelCase , """w""" ) )
A_ : Dict = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A_ : Dict = Path(_lowerCamelCase ) / """preprocessor_config.json"""
A_ : Any = Path(_lowerCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_lowerCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowerCamelCase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A_ : str = AutoImageProcessor.from_pretrained(_lowerCamelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
A_ : Dict = CLIPImageProcessor(**_lowerCamelCase )
# save in new folder
model_config.save_pretrained(_lowerCamelCase )
config.save_pretrained(_lowerCamelCase )
A_ : Any = AutoImageProcessor.from_pretrained(_lowerCamelCase )
# make sure private variable is not incorrectly saved
A_ : int = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = Path(_lowerCamelCase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_lowerCamelCase , """w""" ) , )
A_ : Tuple = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
with self.assertRaisesRegex(
_lowerCamelCase , """clip-base is not a local folder and is not a valid model identifier""" ):
A_ : Optional[Any] = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase_ ( self ) -> Dict:
with self.assertRaisesRegex(
_lowerCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowerCamelCase , revision="""aaaaaa""" )
def UpperCAmelCase_ ( self ) -> Any:
with self.assertRaisesRegex(
_lowerCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
A_ : List[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase_ ( self ) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCamelCase ):
A_ : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCamelCase ):
A_ : List[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowerCamelCase )
A_ : str = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowerCamelCase , trust_remote_code=_lowerCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def UpperCAmelCase_ ( self ) -> Dict:
try:
AutoConfig.register("""custom""" , _lowerCamelCase )
AutoImageProcessor.register(_lowerCamelCase , _lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase ):
AutoImageProcessor.register(_lowerCamelCase , _lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[Any] = Path(_lowerCamelCase ) / """preprocessor_config.json"""
A_ : Dict = Path(_lowerCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_lowerCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_lowerCamelCase , """w""" ) )
A_ : Optional[int] = CustomImageProcessor.from_pretrained(_lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCamelCase )
A_ : List[str] = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self ) -> List[str]:
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = True
try:
AutoConfig.register("""custom""" , _lowerCamelCase )
AutoImageProcessor.register(_lowerCamelCase , _lowerCamelCase )
# If remote code is not set, the default is to use local
A_ : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
A_ : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
A_ : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(_lowerCamelCase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 164
| 0
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 99
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : List[Any] = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = GPTSwaTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =GPTSwaTokenizer(__snake_case , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , __snake_case ) -> Tuple:
'''simple docstring'''
__a ='This is a test'
__a ='This is a test'
return input_text, output_text
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a ='<s>'
__a =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__snake_case ) , 2000 )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =GPTSwaTokenizer(__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [465, 287, 265, 631, 842] )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
__snake_case , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
# fmt: off
self.assertListEqual(
__snake_case , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =GPTSwaTokenizer(__snake_case )
__a =['This is a test', 'I was born in 92000, and this is falsé.']
__a =[
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__snake_case , __snake_case ):
self.assertListEqual(tokenizer.encode_fast(__snake_case ) , __snake_case )
# Test that decode_fast returns the input text
for text, token_ids in zip(__snake_case , __snake_case ):
self.assertEqual(tokenizer.decode_fast(__snake_case ) , __snake_case )
@slow
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =[
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
__a ={'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='AI-Sweden/gpt-sw3-126m' , sequences=__snake_case , )
| 218
| 0
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Dict = LEDConfig
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : Optional[Any] = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Dict = is_training
UpperCamelCase : List[str] = use_labels
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : int = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Dict = eos_token_id
UpperCamelCase : Optional[Any] = pad_token_id
UpperCamelCase : Optional[int] = bos_token_id
UpperCamelCase : int = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCamelCase : Optional[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCamelCase : Optional[Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tf.concat(
[tf.zeros_like(__SCREAMING_SNAKE_CASE )[:, :-1], tf.ones_like(__SCREAMING_SNAKE_CASE )[:, -1:]] , axis=-1 , )
UpperCamelCase : Tuple = global_attention_mask
return config, inputs_dict
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFLEDModel(config=__SCREAMING_SNAKE_CASE ).get_decoder()
UpperCamelCase : Optional[int] = inputs_dict['''input_ids''']
UpperCamelCase : Any = input_ids[:1, :]
UpperCamelCase : Tuple = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase : Optional[int] = 1
# first forward pass
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1e-3 )
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase : Dict = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__UpperCamelCase : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Dict = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[str] = False
__UpperCamelCase : Dict = False
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = TFLEDModelTester(self )
UpperCamelCase : List[Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : int = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCamelCase : Optional[int] = 2
UpperCamelCase : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCamelCase : List[Any] = True
UpperCamelCase : str = self.model_tester.seq_length
UpperCamelCase : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = outputs.decoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = [t.numpy() for t in outputs.encoder_attentions]
UpperCamelCase : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCamelCase : str = True
UpperCamelCase : str = False
UpperCamelCase : Dict = False
UpperCamelCase : int = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase : str = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCamelCase : str = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Dict = True
UpperCamelCase : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Tuple = True
UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def a ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
__UpperCAmelCase : Optional[Any] = 1E-4
@slow
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCamelCase : Any = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase : Any = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase : int = prepare_led_inputs_dict(model.config , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : Tuple = (1, 1_024, 768)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-3 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCamelCase : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase : Union[str, Any] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase : Any = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-3 , rtol=1e-3 )
| 315
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315
| 1
|
from math import sqrt
def A ( _lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( _lowerCamelCase = 10_001 ):
'''simple docstring'''
_lowerCAmelCase : Any = 0
_lowerCAmelCase : List[str] = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__:
lowerCAmelCase__ : str = field(
default=__A , metadata={'help': 'Model type selected in the list: ' + ', '.join(__A )} )
lowerCAmelCase__ : str = field(
default=__A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ : int = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ : int = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ : int = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ : bool = field(
default=__A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ : bool = field(
default=__A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ : int = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Dict = 'train'
lowerCAmelCase__ : int = 'dev'
class UpperCamelCase__( __A ):
lowerCAmelCase__ : SquadDataTrainingArguments
lowerCAmelCase__ : List[SquadFeatures]
lowerCAmelCase__ : Split
lowerCAmelCase__ : bool
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = Split.train ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = "pt" ,) -> Optional[Any]:
A__ = args
A__ = is_language_sensitive
A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A__ = mode
# Load data features from cache or dataset file
A__ = 'v2' if args.version_2_with_negative else 'v1'
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + '.lock'
with FileLock(__UpperCAmelCase ):
if os.path.exists(__UpperCAmelCase ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(__UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ = self.old_features['features']
A__ = self.old_features.get('dataset' ,__UpperCAmelCase )
A__ = self.old_features.get('examples' ,__UpperCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
A__ , A__ = squad_convert_examples_to_features(
examples=self.examples ,tokenizer=__UpperCAmelCase ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=__UpperCAmelCase ,)
A__ = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} ,__UpperCAmelCase ,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self ,__UpperCAmelCase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
A__ = self.features[i]
A__ = torch.tensor(feature.input_ids ,dtype=torch.long )
A__ = torch.tensor(feature.attention_mask ,dtype=torch.long )
A__ = torch.tensor(feature.token_type_ids ,dtype=torch.long )
A__ = torch.tensor(feature.cls_index ,dtype=torch.long )
A__ = torch.tensor(feature.p_mask ,dtype=torch.float )
A__ = torch.tensor(feature.is_impossible ,dtype=torch.float )
A__ = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ = torch.tensor(feature.start_position ,dtype=torch.long )
A__ = torch.tensor(feature.end_position ,dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 221
| 0
|
'''simple docstring'''
import math
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = [True] * n
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCAmelCase = i * 2
while index < n:
UpperCAmelCase = False
UpperCAmelCase = index + i
UpperCAmelCase = [2]
for i in range(3 , _UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def lowerCamelCase__ ( A : int = 99_99_66_66_33_33 ):
'''simple docstring'''
UpperCAmelCase = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
UpperCAmelCase = prime_sieve(_UpperCAmelCase )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = primes[prime_index]
while (last_prime**2) <= limit:
UpperCAmelCase = primes[prime_index + 1]
UpperCAmelCase = last_prime**2
UpperCAmelCase = next_prime**2
# Get numbers divisible by lps(current)
UpperCAmelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCAmelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCAmelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCAmelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 350
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 91
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case_ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """albert"""
def __init__( self :Union[str, Any] , lowercase_ :List[str]=3_00_00 , lowercase_ :Any=1_28 , lowercase_ :Dict=40_96 , lowercase_ :List[Any]=12 , lowercase_ :Any=1 , lowercase_ :str=64 , lowercase_ :List[str]=1_63_84 , lowercase_ :Any=1 , lowercase_ :Any="gelu_new" , lowercase_ :str=0 , lowercase_ :str=0 , lowercase_ :List[Any]=5_12 , lowercase_ :str=2 , lowercase_ :Any=0.02 , lowercase_ :List[str]=1E-12 , lowercase_ :List[str]=0.1 , lowercase_ :str="absolute" , lowercase_ :Optional[int]=0 , lowercase_ :Union[str, Any]=2 , lowercase_ :Union[str, Any]=3 , **lowercase_ :List[str] , ) -> Any:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = embedding_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_hidden_groups
UpperCAmelCase = num_attention_heads
UpperCAmelCase = inner_group_num
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = position_embedding_type
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 78
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78
| 1
|
UpperCamelCase__ ={
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = set()
# keep track of all the paths to be checked
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_SCREAMING_SNAKE_CASE : List[Any] = queue.pop(0 )
# get the last node from the path
_SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1]
if node not in explored:
_SCREAMING_SNAKE_CASE : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_SCREAMING_SNAKE_CASE : Optional[int] = list(__lowerCamelCase )
new_path.append(__lowerCamelCase )
queue.append(__lowerCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__lowerCamelCase )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_SCREAMING_SNAKE_CASE : Any = [start]
_SCREAMING_SNAKE_CASE : Dict = set(__lowerCamelCase )
# Keep tab on distances from `start` node.
_SCREAMING_SNAKE_CASE : Tuple = {start: 0, target: -1}
while queue:
_SCREAMING_SNAKE_CASE : List[Any] = queue.pop(0 )
if node == target:
_SCREAMING_SNAKE_CASE : Dict = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__lowerCamelCase )
queue.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 325
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'timesformer'
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = attention_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
| 325
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : str = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 14
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for attribute in key.split('.' ):
__lowerCamelCase : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
__lowerCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
__lowerCamelCase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__lowerCamelCase : Dict = value
elif weight_type == "weight_g":
__lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
__lowerCamelCase : Tuple = value
elif weight_type == "bias":
__lowerCamelCase : Dict = value
else:
__lowerCamelCase : Tuple = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = []
__lowerCamelCase : int = fairseq_model.state_dict()
__lowerCamelCase : Optional[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase : Any = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase : Dict = True
if "*" in mapped_key:
__lowerCamelCase : Optional[Any] = name.split(SCREAMING_SNAKE_CASE__ )[0].split('.' )[-2]
__lowerCamelCase : List[Any] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
__lowerCamelCase : Tuple = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase : Dict = 'weight_v'
elif "weight" in name:
__lowerCamelCase : Optional[Any] = 'weight'
elif "bias" in name:
__lowerCamelCase : Optional[int] = 'bias'
else:
__lowerCamelCase : Tuple = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[int] = full_name.split('conv_layers.' )[-1]
__lowerCamelCase : int = name.split('.' )
__lowerCamelCase : Any = int(items[0] )
__lowerCamelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__lowerCamelCase : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__lowerCamelCase : Optional[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__lowerCamelCase : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__lowerCamelCase : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = SEWConfig()
if is_finetuned:
__lowerCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
__lowerCamelCase : str = model.cfg
__lowerCamelCase : Union[str, Any] = fs_config.conv_bias
__lowerCamelCase : int = eval(fs_config.conv_feature_layers )
__lowerCamelCase : List[str] = [x[0] for x in conv_layers]
__lowerCamelCase : List[Any] = [x[1] for x in conv_layers]
__lowerCamelCase : Optional[int] = [x[2] for x in conv_layers]
__lowerCamelCase : Optional[int] = 'gelu'
__lowerCamelCase : List[str] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
__lowerCamelCase : Any = 0.0
__lowerCamelCase : Any = fs_config.activation_fn.name
__lowerCamelCase : Union[str, Any] = fs_config.encoder_embed_dim
__lowerCamelCase : Dict = 0.02
__lowerCamelCase : Optional[int] = fs_config.encoder_ffn_embed_dim
__lowerCamelCase : Dict = 1e-5
__lowerCamelCase : Optional[int] = fs_config.encoder_layerdrop
__lowerCamelCase : Dict = fs_config.encoder_attention_heads
__lowerCamelCase : Optional[int] = fs_config.conv_pos_groups
__lowerCamelCase : Optional[Any] = fs_config.conv_pos
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = fs_config.encoder_layers
__lowerCamelCase : Dict = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__lowerCamelCase : str = model.cfg
__lowerCamelCase : List[Any] = fs_config.final_dropout
__lowerCamelCase : List[str] = fs_config.layerdrop
__lowerCamelCase : Union[str, Any] = fs_config.activation_dropout
__lowerCamelCase : Tuple = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__lowerCamelCase : Any = fs_config.attention_dropout
__lowerCamelCase : List[Any] = fs_config.dropout_input
__lowerCamelCase : Any = fs_config.dropout
__lowerCamelCase : List[Any] = fs_config.mask_channel_length
__lowerCamelCase : Optional[int] = fs_config.mask_channel_prob
__lowerCamelCase : str = fs_config.mask_length
__lowerCamelCase : str = fs_config.mask_prob
__lowerCamelCase : str = 'Wav2Vec2FeatureExtractor'
__lowerCamelCase : Union[str, Any] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True ):
if is_finetuned:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__lowerCamelCase : List[Any] = SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Dict = convert_config(model[0] , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = model[0].eval()
__lowerCamelCase : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
__lowerCamelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
if is_finetuned:
if dict_path:
__lowerCamelCase : Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase : int = target_dict.pad_index
__lowerCamelCase : Dict = target_dict.bos_index
__lowerCamelCase : Optional[int] = target_dict.pad_index
__lowerCamelCase : Dict = target_dict.bos_index
__lowerCamelCase : List[str] = target_dict.eos_index
__lowerCamelCase : List[Any] = len(target_dict.symbols )
__lowerCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase : str = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = SEWForCTC(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : List[str] = SEWModel(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 194
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE__ ) as metadata_file:
__lowerCamelCase : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__lowerCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
# Load the entity vocab file
__lowerCamelCase : List[Any] = load_entity_vocab(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCamelCase : str = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Initialize the embeddings of the special tokens
__lowerCamelCase : Union[str, Any] = state_dict['embeddings.word_embeddings.weight']
__lowerCamelCase : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__lowerCamelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__lowerCamelCase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCamelCase : Optional[int] = f'encoder.layer.{layer_index}.attention.self.'
__lowerCamelCase : Dict = state_dict[prefix + matrix_name]
__lowerCamelCase : List[Any] = state_dict[prefix + matrix_name]
__lowerCamelCase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCamelCase : Optional[int] = state_dict['entity_embeddings.entity_embeddings.weight']
__lowerCamelCase : Union[str, Any] = entity_emb[entity_vocab['[MASK]']]
__lowerCamelCase : Optional[Any] = LukeModel(config=SCREAMING_SNAKE_CASE__ ).eval()
__lowerCamelCase , __lowerCamelCase : List[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if not (len(SCREAMING_SNAKE_CASE__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(SCREAMING_SNAKE_CASE__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__lowerCamelCase : Optional[Any] = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='entity_classification' )
__lowerCamelCase : Dict = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__lowerCamelCase : Union[str, Any] = (39, 42)
__lowerCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , add_prefix_space=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__ )
# Verify word hidden states
if model_size == "large":
__lowerCamelCase : Dict = torch.Size((1, 42, 1_024) )
__lowerCamelCase : int = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
__lowerCamelCase : Union[str, Any] = torch.Size((1, 42, 768) )
__lowerCamelCase : Tuple = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1, 1_024) )
__lowerCamelCase : Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
__lowerCamelCase : int = torch.Size((1, 1, 768) )
__lowerCamelCase : Dict = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE__ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = {}
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : List[Any] = line.rstrip().split('\t' )
__lowerCamelCase : Any = index
return entity_vocab
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowercase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 194
| 1
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if length <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(UpperCAmelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 83
|
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''poolformer'''
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Union[str, Any]=4.0 , _UpperCAmelCase : Tuple=[2, 2, 6, 2] , _UpperCAmelCase : str=[64, 128, 320, 512] , _UpperCAmelCase : int=[7, 3, 3, 3] , _UpperCAmelCase : Optional[Any]=[4, 2, 2, 2] , _UpperCAmelCase : Tuple=[2, 1, 1, 1] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : Optional[int]=0.02 , **_UpperCAmelCase : Optional[int] , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = pool_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = depths
UpperCAmelCase_ = patch_sizes
UpperCAmelCase_ = strides
UpperCAmelCase_ = num_encoder_blocks
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_layer_scale
UpperCAmelCase_ = layer_scale_init_value
UpperCAmelCase_ = initializer_range
super().__init__(**_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 2e-3
| 354
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase = """src/diffusers"""
lowerCamelCase = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowerCamelCase = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCamelCase = spec.loader.load_module()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , lowerCAmelCase__ ) is not None
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = object_name.split("." )
UpperCAmelCase_ = 0
# First let's find the module where our object lives.
UpperCAmelCase_ = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , f"""{module}.py""" ) ):
i += 1
if i < len(lowerCAmelCase__ ):
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(lowerCAmelCase__ , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ = f.readlines()
# Now let's find the class / func in the code!
UpperCAmelCase_ = ""
UpperCAmelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCAmelCase_ = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_ = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
lowerCamelCase = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowerCamelCase = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowerCamelCase = re.compile(r"""<FILL\s+[^>]*>""")
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = code.split("\n" )
UpperCAmelCase_ = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
UpperCAmelCase_ = f"""class Bla:\n{code}"""
UpperCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase__ )
UpperCAmelCase_ = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("class Bla:\n" ) :] if has_indent else result
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
with open(lowerCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
UpperCAmelCase_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = search.groups()
UpperCAmelCase_ = find_code_in_diffusers(lowerCAmelCase__ )
UpperCAmelCase_ = get_indent(lowerCAmelCase__ )
UpperCAmelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCAmelCase_ = theoretical_indent
UpperCAmelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCAmelCase_ = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(f"""^{indent}# End copy""" , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_ = lines[start_index:line_index]
UpperCAmelCase_ = "".join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCAmelCase_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
UpperCAmelCase_ = "\n".join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = replace_pattern.replace("with" , "" ).split("," )
UpperCAmelCase_ = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = pattern.groups()
UpperCAmelCase_ = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
UpperCAmelCase_ = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
UpperCAmelCase_ = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCAmelCase_ = blackify(lines[start_index - 1] + theoretical_code )
UpperCAmelCase_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCAmelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCAmelCase_ = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def a__ ( lowerCAmelCase__ = False ):
UpperCAmelCase_ = glob.glob(os.path.join(lowerCAmelCase__ , "**/*.py" ) , recursive=lowerCAmelCase__ )
UpperCAmelCase_ = []
for filename in all_files:
UpperCAmelCase_ = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = "\n".join(lowerCAmelCase__ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCamelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 241
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ (A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__lowercase , """depth_multiplier""" ) )
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=3 , __lowercase=3_2 , __lowercase=0.2_5 , __lowercase=8 , __lowercase=True , __lowercase=1_0_2_4 , __lowercase=3_2 , __lowercase="relu6" , __lowercase=0.1 , __lowercase=0.0_2 , __lowercase=True , __lowercase=True , __lowercase=1_0 , __lowercase=None , ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = parent
a__ : Dict = batch_size
a__ : Optional[int] = num_channels
a__ : int = image_size
a__ : Union[str, Any] = depth_multiplier
a__ : int = min_depth
a__ : List[str] = tf_padding
a__ : Tuple = int(last_hidden_size * depth_multiplier )
a__ : Union[str, Any] = output_stride
a__ : List[Any] = hidden_act
a__ : int = classifier_dropout_prob
a__ : str = use_labels
a__ : Dict = is_training
a__ : Dict = num_labels
a__ : int = initializer_range
a__ : List[Any] = scope
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : str = None
a__ : List[str] = None
if self.use_labels:
a__ : Any = ids_tensor([self.batch_size] , self.num_labels )
a__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = MobileNetVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
a__ : List[Any] = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : int = self.num_labels
a__ : Dict = MobileNetVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
a__ : List[str] = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : Dict = config_and_inputs
a__ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__lowerCAmelCase :int = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase :List[Any] = False
__lowerCAmelCase :Optional[Any] = False
__lowerCAmelCase :Optional[Any] = False
__lowerCAmelCase :Dict = False
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : List[Any] = MobileNetVaModelTester(self )
a__ : Tuple = MobileNetVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str = model_class(__lowercase )
a__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[Any] = [*signature.parameters.keys()]
a__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__lowercase , __lowercase , __lowercase ):
a__ : Dict = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(__lowercase , __lowercase ) )
a__ : List[Any] = outputs.hidden_states
a__ : List[Any] = 2_6
self.assertEqual(len(__lowercase ) , __lowercase )
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Union[str, Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Dict = MobileNetVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
a__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(__lowercase )
a__ : Union[str, Any] = self.default_image_processor
a__ : Optional[Any] = prepare_img()
a__ : List[str] = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
a__ : Tuple = model(**__lowercase )
# verify the logits
a__ : Tuple = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __lowercase )
a__ : Tuple = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
| 170
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''mra'''
def __init__( self , lowerCAmelCase_=5_02_65 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-5 , lowerCAmelCase_="absolute" , lowerCAmelCase_=4 , lowerCAmelCase_="full" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> List[str]:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = type_vocab_size
_A = layer_norm_eps
_A = position_embedding_type
_A = block_per_row
_A = approx_mode
_A = initial_prior_first_n_blocks
_A = initial_prior_diagonal_n_blocks
| 81
|
import cva
import numpy as np
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
if k in (0.04, 0.06):
_A = k
_A = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ) -> str:
return str(self.k )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> tuple[cva.Mat, list[list[int]]]:
_A = cva.imread(lowerCAmelCase_ , 0 )
_A , _A = img.shape
_A = []
_A = img.copy()
_A = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_GRAY2RGB )
_A , _A = np.gradient(lowerCAmelCase_ )
_A = dx**2
_A = dy**2
_A = dx * dy
_A = 0.04
_A = self.window_size // 2
for y in range(lowerCAmelCase_ , h - offset ):
for x in range(lowerCAmelCase_ , w - offset ):
_A = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = (wxx * wyy) - (wxy**2)
_A = wxx + wyy
_A = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = HarrisCorner(0.04, 3)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 81
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _A( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = IFInpaintingPipeline
UpperCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCAmelCase_ ( self ):
return self._get_dummy_components()
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(_A ).startswith('mps' ):
__A : Union[str, Any] = torch.manual_seed(_A )
else:
__A : int = torch.Generator(device=_A ).manual_seed(_A )
__A : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__A : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__A : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase_ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase_ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase_ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase_ ( self ):
self._test_save_load_local()
def UpperCAmelCase_ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 280
|
import cva
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ):
if k in (0.04, 0.06):
A_ = k
A_ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Optional[Any] ):
return str(self.k )
def __A ( self : int , UpperCAmelCase : str ):
A_ = cva.imread(UpperCAmelCase , 0 )
A_ , A_ = img.shape
A_ = []
A_ = img.copy()
A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
A_ , A_ = np.gradient(UpperCAmelCase )
A_ = dx**2
A_ = dy**2
A_ = dx * dy
A_ = 0.04
A_ = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
A_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = (wxx * wyy) - (wxy**2)
A_ = wxx + wyy
A_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a :List[str] = HarrisCorner(0.04, 3)
__a , __a :str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 312
| 0
|
"""simple docstring"""
import os
__snake_case = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = 0
_a = 0
while index < len(_lowerCAmelCase ) - 1:
_a = SYMBOLS[numerals[index]]
_a = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_a = ''''''
_a = num // 10_00
numerals += m_count * "M"
num %= 10_00
_a = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
_a = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A_ ( _lowerCAmelCase : str = "/p089_roman.txt" ):
"""simple docstring"""
_a = 0
with open(os.path.dirname(_lowerCAmelCase ) + roman_numerals_filename ) as filea:
_a = filea.readlines()
for line in lines:
_a = line.strip()
_a = parse_roman_numerals(_lowerCAmelCase )
_a = generate_roman_numerals(_lowerCAmelCase )
savings += len(_lowerCAmelCase ) - len(_lowerCAmelCase )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 153
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_a = remove_duplicates(key.upper() )
_a = len(_lowerCAmelCase )
# First fill cipher with key characters
_a = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowerCAmelCase ), 26 ):
_a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_a = alphabet[i - offset]
_a = char
return cipher_alphabet
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : dict[str, str] ):
"""simple docstring"""
return "".join(cipher_map.get(_lowerCAmelCase, _lowerCAmelCase ) for ch in message.upper() )
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : dict[str, str] ):
"""simple docstring"""
_a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowerCAmelCase, _lowerCAmelCase ) for ch in message.upper() )
def A_ ( ):
"""simple docstring"""
_a = input('''Enter message to encode or decode: ''' ).strip()
_a = input('''Enter keyword: ''' ).strip()
_a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
_a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
_a = create_cipher_map(_lowerCAmelCase )
print(func(_lowerCAmelCase, _lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 153
| 1
|
"""simple docstring"""
_a = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 17
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325
| 0
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> Tuple:
if isinstance(a_ ,a_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_UpperCAmelCase : Any = deepcopy(a_ )
elif os.path.exists(a_ ):
with io.open(a_ ,"""r""" ,encoding="""utf-8""" ) as f:
_UpperCAmelCase : Any = json.load(a_ )
else:
try:
_UpperCAmelCase : Union[str, Any] = baseaa.urlsafe_baadecode(a_ ).decode("""utf-8""" )
_UpperCAmelCase : Optional[int] = json.loads(a_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
_UpperCAmelCase : Any = config
self.set_stage_and_offload()
def _snake_case ( self ) -> Dict:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
_UpperCAmelCase : Any = self.get_value("""zero_optimization.stage""" ,-1 )
# offload
_UpperCAmelCase : List[Any] = False
if self.is_zeroa() or self.is_zeroa():
_UpperCAmelCase : Optional[int] = set(["""cpu""", """nvme"""] )
_UpperCAmelCase : Optional[int] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_UpperCAmelCase : List[Any] = True
def _snake_case ( self ,a_ ) -> Tuple:
_UpperCAmelCase : List[str] = self.config
# find the config node of interest if it exists
_UpperCAmelCase : List[Any] = ds_key_long.split(""".""" )
_UpperCAmelCase : Dict = nodes.pop()
for node in nodes:
_UpperCAmelCase : Optional[int] = config.get(a_ )
if config is None:
return None, ds_key
return config, ds_key
def _snake_case ( self ,a_ ,a_=None ) -> Tuple:
_UpperCAmelCase ,_UpperCAmelCase : int = self.find_config_node(a_ )
if config is None:
return default
return config.get(a_ ,a_ )
def _snake_case ( self ,a_ ,a_=False ) -> int:
_UpperCAmelCase : Optional[int] = self.config
# find the config node of interest if it exists
_UpperCAmelCase : Tuple = ds_key_long.split(""".""" )
for node in nodes:
_UpperCAmelCase : Dict = config
_UpperCAmelCase : List[str] = config.get(a_ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(a_ )
def _snake_case ( self ,a_ ) -> Any:
_UpperCAmelCase : str = self.get_value(a_ )
return False if value is None else bool(a_ )
def _snake_case ( self ,a_ ) -> str:
_UpperCAmelCase : Optional[int] = self.get_value(a_ )
return False if value is None else not bool(a_ )
def _snake_case ( self ) -> List[Any]:
return self._stage == 2
def _snake_case ( self ) -> Optional[Any]:
return self._stage == 3
def _snake_case ( self ) -> Dict:
return self._offload
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = engine
def _snake_case ( self ,a_ ,**a_ ) -> Tuple:
# runs backpropagation and handles mixed precision
self.engine.backward(a_ ,**a_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ) -> str:
super().__init__(a_ ,device_placement=a_ ,scaler=a_ )
_UpperCAmelCase : Dict = hasattr(self.optimizer ,"""overflow""" )
def _snake_case ( self ,a_=None ) -> Union[str, Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _snake_case ( self ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _snake_case ( self ) -> Any:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ) -> Union[str, Any]:
super().__init__(a_ ,a_ )
def _snake_case ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_=0.001 ,a_=0 ,**a_ ) -> Union[str, Any]:
_UpperCAmelCase : Any = params
_UpperCAmelCase : Optional[int] = lr
_UpperCAmelCase : Optional[Any] = weight_decay
_UpperCAmelCase : Tuple = kwargs
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_=None ,a_=0 ,**a_ ) -> Dict:
_UpperCAmelCase : List[str] = optimizer
_UpperCAmelCase : str = total_num_steps
_UpperCAmelCase : Optional[Any] = warmup_num_steps
_UpperCAmelCase : int = kwargs
| 349
|
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[Any] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : Optional[int] = tokenizer.sep_token_id
_UpperCAmelCase : Union[str, Any] = tokenizer.cls_token_id
_UpperCAmelCase : str = 128
_UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
_UpperCAmelCase : Any = train_dataset.select(range(32 ) )
_UpperCAmelCase : Any = val_dataset.select(range(16 ) )
_UpperCAmelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(a_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : int = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=a_ ,max_length=512 )
_UpperCAmelCase : Tuple = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=a_ ,max_length=128 )
_UpperCAmelCase : int = inputs.input_ids
_UpperCAmelCase : Union[str, Any] = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : Dict = outputs.input_ids.copy()
_UpperCAmelCase : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Optional[int] = outputs.attention_mask
assert all(len(a_ ) == 512 for x in inputs.input_ids )
assert all(len(a_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a_ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : str = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a_ ) )] ) / len(a_ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
_UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[str] = SeqaSeqTrainingArguments(
output_dir=a_ ,per_device_train_batch_size=a_ ,per_device_eval_batch_size=a_ ,predict_with_generate=a_ ,evaluation_strategy="""steps""" ,do_train=a_ ,do_eval=a_ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
_UpperCAmelCase : int = SeqaSeqTrainer(
model=a_ ,args=a_ ,compute_metrics=_compute_metrics ,train_dataset=a_ ,eval_dataset=a_ ,tokenizer=a_ ,)
# start training
trainer.train()
| 349
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
def UpperCAmelCase ( a_=None , a_=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=a_ )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
snake_case_ = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
snake_case_ = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Use FP16 to accelerate inference."} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark training of model"} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Verbose memory tracing"} )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Trace memory line by line"} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Save result to a CSV file"} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Save all print statements in a log file"} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to print environment information"} )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
snake_case_ = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , )
snake_case_ = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
snake_case_ = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
snake_case_ = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
snake_case_ = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , )
snake_case_ = field(
default=F"""log_{round(time() )}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , )
snake_case_ = field(default=3 , metadata={"help": "Times an experiment will be run."} )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def UpperCamelCase_ ( self : List[str] ):
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." ,A ,)
def UpperCamelCase_ ( self : Tuple ):
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def UpperCamelCase_ ( self : List[str] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 15
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
if gpta_config_file == "":
SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaConfig()
else:
SCREAMING_SNAKE_CASE_ : Tuple = GPTaConfig.from_json_file(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaModel(__a )
# Load weights from numpy
load_tf_weights_in_gpta(__a , __a , __a )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , __a )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 91
| 0
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__magic_name__ = None
def lowercase (_A , _A , ):
"""simple docstring"""
import pyspark
def generate_fn():
_lowerCAmelCase : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
_lowerCAmelCase : Dict = df_with_partition_id.select('*' ).where(f'part_id = {partition_id}' ).drop('part_id' )
_lowerCAmelCase : List[Any] = partition_df.collect()
_lowerCAmelCase : List[Any] = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : str = df
_lowerCAmelCase : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
_lowerCAmelCase : Optional[int] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case__ )
return SparkExamplesIterable(self.df , partition_order=snake_case__ )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.split_shard_indices_by_worker(snake_case__ , snake_case__ )
return SparkExamplesIterable(self.df , partition_order=snake_case__ )
@property
def a ( self ):
'''simple docstring'''
return len(self.partition_order )
class UpperCamelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__magic_name__ = SparkConfig
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
import pyspark
_lowerCAmelCase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowerCAmelCase : List[Any] = df
_lowerCAmelCase : List[str] = working_dir
super().__init__(
cache_dir=snake_case__ , config_name=str(self.df.semanticHash() ) , **snake_case__ , )
def a ( self ):
'''simple docstring'''
def create_cache_and_write_probe(snake_case__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case__ )
_lowerCAmelCase : Tuple = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowerCAmelCase : Optional[int] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a ( self , snake_case__ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a ( self , snake_case__ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(snake_case__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
_lowerCAmelCase : Optional[Any] = self.df.count()
_lowerCAmelCase : int = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowerCAmelCase : int = (
self.df.limit(snake_case__ )
.repartition(1 )
.mapInArrow(snake_case__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowerCAmelCase : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowerCAmelCase : List[Any] = min(snake_case__ , int(approx_total_size / max_shard_size ) )
_lowerCAmelCase : Tuple = self.df.repartition(snake_case__ )
def a ( self , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
import pyspark
_lowerCAmelCase : Optional[int] = ParquetWriter if file_format == 'parquet' else ArrowWriter
_lowerCAmelCase : Optional[int] = os.path.join(self._working_dir , os.path.basename(snake_case__ ) ) if self._working_dir else fpath
_lowerCAmelCase : str = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowerCAmelCase : List[str] = self.config.features
_lowerCAmelCase : str = self._writer_batch_size
_lowerCAmelCase : Tuple = self._fs.storage_options
def write_arrow(snake_case__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowerCAmelCase : str = pyspark.TaskContext().taskAttemptId()
_lowerCAmelCase : Dict = next(snake_case__ , snake_case__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Any = writer_class(
features=snake_case__ , path=working_fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , writer_batch_size=snake_case__ , storage_options=snake_case__ , embed_local_files=snake_case__ , )
_lowerCAmelCase : Union[str, Any] = pa.Table.from_batches([first_batch] )
writer.write_table(snake_case__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowerCAmelCase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
_lowerCAmelCase : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , writer_batch_size=snake_case__ , storage_options=snake_case__ , embed_local_files=snake_case__ , )
_lowerCAmelCase : Any = pa.Table.from_batches([batch] )
writer.write_table(snake_case__ )
if writer._num_bytes > 0:
_lowerCAmelCase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case__ ) ):
_lowerCAmelCase : Any = os.path.join(os.path.dirname(snake_case__ ) , os.path.basename(snake_case__ ) )
shutil.move(snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = (
self.df.mapInArrow(snake_case__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a ( self , snake_case__ , snake_case__ = "arrow" , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
self._validate_cache_dir()
_lowerCAmelCase : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case__ )
_lowerCAmelCase : Dict = not is_remote_filesystem(self._fs )
_lowerCAmelCase : Any = os.path.join if is_local else posixpath.join
_lowerCAmelCase : List[str] = '-TTTTT-SSSSS-of-NNNNN'
_lowerCAmelCase : List[Any] = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
_lowerCAmelCase : Optional[int] = path_join(self._output_dir , snake_case__ )
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Tuple = []
for task_id, content in self._prepare_split_single(snake_case__ , snake_case__ , snake_case__ ):
(
_lowerCAmelCase
) : Optional[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case__ )
_lowerCAmelCase : Union[str, Any] = total_num_examples
_lowerCAmelCase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
_lowerCAmelCase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowerCAmelCase : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
snake_case__ , snake_case__ , snake_case__ , ):
rename(
snake_case__ , fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , fpath.replace('TTTTT-SSSSS' , F'{global_shard_id:05d}' ).replace('NNNNN' , F'{total_shards:05d}' ) , )
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = 0
for i in range(len(snake_case__ ) ):
_lowerCAmelCase : List[str] = task_id_and_num_shards[i]
for shard_id in range(snake_case__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case__ , len(snake_case__ ) ).map(lambda snake_case__ : _rename_shard(*snake_case__ ) ).collect()
else:
# don't use any pattern
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : int = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , fpath.replace(snake_case__ , '' ) , )
def a ( self , snake_case__ , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 367
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25
| 0
|
import random
from typing import Any
def __lowerCAmelCase ( a__ ) -> Any:
for _ in range(len(a__ ) ):
__a = random.randint(0 , len(a__ ) - 1 )
__a = random.randint(0 , len(a__ ) - 1 )
__a , __a = data[b], data[a]
return data
if __name__ == "__main__":
A : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
A : str = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 6
|
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
if isinstance(a__ , a__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(a__ , a__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
__SCREAMING_SNAKE_CASE = False
if num < 0:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = -num
__SCREAMING_SNAKE_CASE = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(a__ ) for e in binary )
return "0b" + "".join(str(a__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
| 0
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_SCREAMING_SNAKE_CASE = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCamelCase = list(s_dict.keys() )
for key in keys:
UpperCamelCase = R""".*/layers_(\d+)"""
UpperCamelCase = key
if re.match(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , UpperCamelCase_ )
UpperCamelCase = R"""(encoder|decoder)\/"""
if re.match(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = re.match(UpperCamelCase_ , UpperCamelCase_ ).groups()
if groups[0] == "encoder":
UpperCamelCase = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , UpperCamelCase_ )
UpperCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , UpperCamelCase_ )
elif groups[0] == "decoder":
UpperCamelCase = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , UpperCamelCase_ )
UpperCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , UpperCamelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCamelCase = new_key.replace(UpperCamelCase_ , UpperCamelCase_ )
print(f"""{key} -> {new_key}""" )
UpperCamelCase = s_dict.pop(UpperCamelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCamelCase = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCamelCase = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCamelCase = s_dict[key].shape[0]
UpperCamelCase = s_dict[key]
for idx in range(UpperCamelCase_ ):
UpperCamelCase = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase_ )
return s_dict
_SCREAMING_SNAKE_CASE = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCamelCase_ , """r""" ) as f:
UpperCamelCase = f.read()
UpperCamelCase = re.findall(R"""(.*) = ([0-9.]*)""" , UpperCamelCase_ )
UpperCamelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCamelCase = float(UpperCamelCase_ ) if """.""" in value else int(UpperCamelCase_ )
UpperCamelCase = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , UpperCamelCase_ )[0]
UpperCamelCase = str(activation[1] )
UpperCamelCase = num_experts
UpperCamelCase = SwitchTransformersConfig(**UpperCamelCase_ )
return config
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="./" , UpperCamelCase_=8 ) -> Optional[int]:
'''simple docstring'''
# Initialise PyTorch model
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCamelCase = checkpoints.load_tax_checkpoint(UpperCamelCase_ )
if gin_file is not None:
UpperCamelCase = convert_gin_to_config(UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCamelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase_ )
UpperCamelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase_ )
UpperCamelCase = flax_params["""target"""]
UpperCamelCase = flatten_dict(UpperCamelCase_ , sep="""/""" )
UpperCamelCase = rename_keys(UpperCamelCase_ )
UpperCamelCase = unflatten_dict(UpperCamelCase_ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase_ , UpperCamelCase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 165
|
def lowercase( UpperCamelCase_ = 1000000 ) -> int:
'''simple docstring'''
UpperCamelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , UpperCamelCase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 165
| 1
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __snake_case ( UpperCAmelCase_ : Iterable[str] , UpperCAmelCase_ : int ):
lowerCamelCase_ = iter(UpperCAmelCase_ )
while True:
lowerCamelCase_ = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCamelCase_ = ""
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def __snake_case ( UpperCAmelCase_ : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowerCamelCase_ = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCamelCase_ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCamelCase_ = generate_table(UpperCAmelCase_ )
lowerCamelCase_ = prepare_input(UpperCAmelCase_ )
lowerCamelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCamelCase_ = generate_table(UpperCAmelCase_ )
lowerCamelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 55
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Whether tp freeze the encoder."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_lowerCamelCase = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
_lowerCamelCase = field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
_lowerCamelCase = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
_lowerCamelCase = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
_lowerCamelCase = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Source language id for translation."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Target language id for translation."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "# num_beams to use for evaluation."} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , F'''{split}_results.json''' ) )
def __snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
assert hasattr(UpperCAmelCase_ , UpperCAmelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCAmelCase_ , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCAmelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCAmelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCAmelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCamelCase_ = SeqaSeqDataset
# Get datasets
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCAmelCase_ ) if training_args.predict_with_generate else None
)
lowerCamelCase_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , data_args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , data_collator=SeqaSeqDataCollator(
UpperCAmelCase_ , UpperCAmelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
lowerCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
lowerCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
lowerCamelCase_ = data_args.n_val
lowerCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ = trainer.predict(test_dataset=UpperCAmelCase_ , metric_key_prefix="test" )
lowerCamelCase_ = test_output.metrics
lowerCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
lowerCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
if training_args.predict_with_generate:
lowerCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
lowerCamelCase_ = lmap(str.strip , UpperCAmelCase_ )
write_txt_file(UpperCAmelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCAmelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def __snake_case ( UpperCAmelCase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 55
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class __UpperCAmelCase :
def __init__( self ):
"""simple docstring"""
_snake_case = []
_snake_case = 0
_snake_case = 0
def lowerCamelCase ( self ):
"""simple docstring"""
return self.head == self.tail
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.data.append(lowerCAmelCase_ )
_snake_case = self.tail + 1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.data[self.head]
_snake_case = self.head + 1
return ret
def lowerCamelCase ( self ):
"""simple docstring"""
return self.tail - self.head
def lowerCamelCase ( self ):
"""simple docstring"""
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = data
_snake_case = None
_snake_case = None
_snake_case = 1
def lowerCamelCase ( self ):
"""simple docstring"""
return self.data
def lowerCamelCase ( self ):
"""simple docstring"""
return self.left
def lowerCamelCase ( self ):
"""simple docstring"""
return self.right
def lowerCamelCase ( self ):
"""simple docstring"""
return self.height
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = data
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = node
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = node
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = height
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
if node is None:
return 0
return node.get_height()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
if a > b:
return a
return b
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
print('left rotation node:' , node.get_data() )
_snake_case = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
_snake_case = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
print('right rotation node:' , node.get_data() )
_snake_case = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
_snake_case = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
_snake_case = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__A ) )
return right_rotation(__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
_snake_case = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__A ) )
return left_rotation(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> MyNode | None:
if node is None:
return MyNode(__A )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __A ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_snake_case = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_snake_case = right_rotation(__A )
else:
_snake_case = lr_rotation(__A )
else:
node.set_right(insert_node(node.get_right() , __A ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_snake_case = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_snake_case = rl_rotation(__A )
else:
_snake_case = left_rotation(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
return node
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
while True:
_snake_case = root.get_right()
if right_child is None:
break
_snake_case = right_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
while True:
_snake_case = root.get_left()
if left_child is None:
break
_snake_case = left_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> MyNode | None:
_snake_case = root.get_left()
_snake_case = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_snake_case = get_left_most(__A )
root.set_data(__A )
root.set_right(del_node(__A , __A ) )
elif left_child is not None:
_snake_case = left_child
elif right_child is not None:
_snake_case = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(__A , __A ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__A , __A ) )
if get_height(__A ) - get_height(__A ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_snake_case = left_rotation(__A )
else:
_snake_case = rl_rotation(__A )
elif get_height(__A ) - get_height(__A ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_snake_case = right_rotation(__A )
else:
_snake_case = lr_rotation(__A )
_snake_case = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__A )
return root
class __UpperCAmelCase :
def __init__( self ):
"""simple docstring"""
_snake_case = None
def lowerCamelCase ( self ):
"""simple docstring"""
return get_height(self.root )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
print('insert:' + str(lowerCAmelCase_ ) )
_snake_case = insert_node(self.root , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
print('delete:' + str(lowerCAmelCase_ ) )
if self.root is None:
print('Tree is empty!' )
return
_snake_case = del_node(self.root , lowerCAmelCase_ )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
_snake_case = ''
_snake_case = MyQueue()
q.push(self.root )
_snake_case = self.get_height()
if layer == 0:
return output
_snake_case = 0
while not q.is_empty():
_snake_case = q.pop()
_snake_case = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase_ )
q.push(lowerCAmelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_snake_case = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , lowerCAmelCase_ ) - 1:
_snake_case = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def SCREAMING_SNAKE_CASE__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowercase : List[Any] = AVLtree()
lowercase : Dict = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 160
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def SCREAMING_SNAKE_CASE__ ( __A , __A=1_000 ) -> str:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__A , __A , __A )
if b != 1:
_snake_case = True
for _ in range(__A ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowercase : Optional[int] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 160
| 1
|
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
if not head:
return True
# split the list to two parts
__UpperCAmelCase , __UpperCAmelCase : str = head.next, head
while fast and fast.next:
__UpperCAmelCase : List[str] = fast.next.next
__UpperCAmelCase : Optional[int] = slow.next
__UpperCAmelCase : Tuple = slow.next
__UpperCAmelCase : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
__UpperCAmelCase : Union[str, Any] = None
while second:
__UpperCAmelCase : Optional[Any] = second.next
__UpperCAmelCase : Union[str, Any] = node
__UpperCAmelCase : List[str] = second
__UpperCAmelCase : Union[str, Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__UpperCAmelCase : List[str] = node.next
__UpperCAmelCase : int = head.next
return True
def _UpperCamelCase ( snake_case__ ) -> int:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__UpperCAmelCase : List[str] = head
while fast and fast.next:
__UpperCAmelCase , __UpperCAmelCase : int = fast.next.next, slow.next
# 2. Push the second half into the stack
__UpperCAmelCase : List[Any] = [slow.val]
while slow.next:
__UpperCAmelCase : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__UpperCAmelCase : int = cur.next
return True
def _UpperCamelCase ( snake_case__ ) -> str:
if not head or not head.next:
return True
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Optional[int] = 0
while head:
if head.val in d:
d[head.val].append(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = [pos]
__UpperCAmelCase : Tuple = head.next
pos += 1
__UpperCAmelCase : Optional[int] = pos - 1
__UpperCAmelCase : Optional[int] = 0
for v in d.values():
if len(UpperCamelCase_ ) % 2 != 0:
middle += 1
else:
__UpperCAmelCase : Optional[int] = 0
for i in range(0, len(UpperCamelCase_ ) ):
if v[i] + v[len(UpperCamelCase_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 157
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : List[str] = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25
| 1
|
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowercase ) == len(lowercase ), f'''{len(lowercase )} != {len(lowercase )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowerCamelCase_ : List[str] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowerCamelCase_ : Union[str, Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def _A ( lowercase , lowercase ):
"""simple docstring"""
try:
a =LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(lowercase ) )
def _A ( lowercase , lowercase ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowercase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _A ( lowercase , lowercase = "student" , lowercase = None , lowercase = None , lowercase=False , lowercase=None , lowercase=None , **lowercase , ):
"""simple docstring"""
a ='''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(lowercase , lowercase ):
AutoTokenizer.from_pretrained(lowercase ).save_pretrained(lowercase ) # purely for convenience
a =AutoModelForSeqaSeqLM.from_pretrained(lowercase ).eval()
else:
assert isinstance(lowercase , lowercase ), f'''teacher must be a model or string got type {type(lowercase )}'''
a =teacher.config.to_diff_dict()
try:
a , a =teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
a =teacher_e
if d is None:
a =teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
a , a =teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
a , a =teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
a =teacher_e
if d is None:
a =teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowercase )
# Copy weights
a =teacher.config_class(**lowercase )
a =AutoModelForSeqaSeqLM.from_config(lowercase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
a =student.load_state_dict(teacher.state_dict() , strict=lowercase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
a , a =list(range(lowercase ) ), list(range(lowercase ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(lowercase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
a =pick_layers_to_copy(lowercase , lowercase )
if d_layers_to_copy is None:
a =pick_layers_to_copy(lowercase , lowercase )
try:
if hasattr(
lowercase , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowercase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowercase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowercase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowercase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowercase )
copy_layers(teacher.decoder.block , student.decoder.block , lowercase )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
a ={
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(lowercase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 81
|
"""simple docstring"""
lowerCamelCase_ : int = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Union[str, Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 81
| 1
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase = ['small', 'medium', 'large']
lowerCAmelCase = 'lm_head.decoder.weight'
lowerCAmelCase = 'lm_head.weight'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = torch.load(SCREAMING_SNAKE_CASE )
lowercase__ = d.pop(SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
lowerCAmelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
lowerCAmelCase = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 364
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
_lowercase : int
_lowercase : TreeNode | None = None
_lowercase : TreeNode | None = None
lowerCAmelCase = namedtuple('CoinsDistribResult', 'moves excess')
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE ) != count_coins(SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase__ , lowercase__ = get_distrib(node.left )
lowercase__ , lowercase__ = get_distrib(node.right )
lowercase__ = 1 - left_distrib_excess
lowercase__ = 1 - right_distrib_excess
lowercase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE )
+ abs(SCREAMING_SNAKE_CASE )
)
lowercase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return get_distrib(SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
| 0
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowerCAmelCase__ = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
state_dict.pop("pixel_mean" , _SCREAMING_SNAKE_CASE )
state_dict.pop("pixel_std" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCamelCase = key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = int(re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(2 ) )
if layer_nb == 0:
UpperCamelCase = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
UpperCamelCase = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
UpperCamelCase = key.replace("layers.2" , "proj_out" )
UpperCamelCase = value
UpperCamelCase = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ):
"""simple docstring"""
UpperCamelCase = hf_hub_download(_SCREAMING_SNAKE_CASE , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
UpperCamelCase = SamConfig()
elif "sam_vit_l" in model_name:
UpperCamelCase = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
UpperCamelCase = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
elif "sam_vit_h" in model_name:
UpperCamelCase = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
UpperCamelCase = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
UpperCamelCase = replace_keys(_SCREAMING_SNAKE_CASE )
UpperCamelCase = SamImageProcessor()
UpperCamelCase = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = SamModel(_SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = hf_model.to("cuda" )
UpperCamelCase = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
UpperCamelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
UpperCamelCase = [[[400, 650]]]
UpperCamelCase = [[1]]
UpperCamelCase = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCamelCase = hf_model(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
UpperCamelCase = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCamelCase = hf_model(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
UpperCamelCase = ((75, 275, 1_725, 850),)
UpperCamelCase = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCamelCase = hf_model(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
UpperCamelCase = [[[400, 650], [800, 650]]]
UpperCamelCase = [[1, 1]]
UpperCamelCase = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCamelCase = hf_model(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
lowerCAmelCase__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 153
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if name is None:
UpperCamelCase = None
else:
UpperCamelCase = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
UpperCamelCase = fmt.format(_SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if msg is not None:
print(_SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(_SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
print(_SCREAMING_SNAKE_CASE , ":" , val.size() )
else:
print(_SCREAMING_SNAKE_CASE , ":" , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = param.transpose(0 , 2 )
UpperCamelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = param.transpose(0 , 1 ).contiguous()
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
return param
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
# old versions did not store training args
UpperCamelCase = input_state_dict.get("args" , _SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase = ds_args.padded_vocab_size
UpperCamelCase = ds_args.max_position_embeddings
UpperCamelCase = ds_args.hidden_size
UpperCamelCase = ds_args.num_layers
UpperCamelCase = ds_args.num_attention_heads
UpperCamelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase = config.n_head
# The hidden_size per head.
UpperCamelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase = input_state_dict["checkpoint_version"]
else:
UpperCamelCase = 0.0
# The model.
UpperCamelCase = input_state_dict["model"]
# The language model.
UpperCamelCase = model["language_model"]
# The embeddings.
UpperCamelCase = lm["embedding"]
# The word embeddings.
UpperCamelCase = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase = word_embeddings[: config.vocab_size, :]
UpperCamelCase = word_embeddings
# The position embeddings.
UpperCamelCase = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
UpperCamelCase = pos_embeddings
# The transformer.
UpperCamelCase = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
UpperCamelCase = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
UpperCamelCase = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase = layer_re.match(_SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase = m.group(3 )
# The name of the layer.
UpperCamelCase = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
UpperCamelCase = "ln_1" if op_name.startswith("input" ) else "ln_2"
UpperCamelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase = torch.tensor(-1e4 , dtype=torch.floataa )
UpperCamelCase = masked_bias
UpperCamelCase = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Store. No change of shape.
UpperCamelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase = megatron_to_transformers[op_name]
UpperCamelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase = megatron_to_transformers[op_name]
UpperCamelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase = transformer["final_layernorm.weight"]
UpperCamelCase = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase = word_embeddings
# It should be done!
return output_state_dict
def a__ ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=_SCREAMING_SNAKE_CASE , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=_SCREAMING_SNAKE_CASE , help="An optional config json file describing the pre-trained model." , )
UpperCamelCase = parser.parse_args()
# Extract the basename.
UpperCamelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
else:
UpperCamelCase = torch.load(args.path_to_checkpoint , map_location="cpu" )
UpperCamelCase = input_state_dict.get("args" , _SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase = "gelu_fast"
elif ds_args.openai_gelu:
UpperCamelCase = "gelu_new"
else:
UpperCamelCase = "gelu"
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase = "gelu_new"
# Spell out all parameters in case the defaults change.
UpperCamelCase = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_SCREAMING_SNAKE_CASE , summary_activation=_SCREAMING_SNAKE_CASE , summary_proj_to_labels=_SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
UpperCamelCase = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
UpperCamelCase = convert_megatron_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
UpperCamelCase = "gpt2"
UpperCamelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = type(_SCREAMING_SNAKE_CASE ).__name__
UpperCamelCase = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "pytorch_model.bin" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 153
| 1
|
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ComputeEnvironment.AMAZON_SAGEMAKER
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[Any] = 'ml.p3.2xlarge'
__UpperCAmelCase : List[str] = 'accelerate_sagemaker_execution_role'
__UpperCAmelCase : Optional[Any] = 'hf-sm'
__UpperCAmelCase : Optional[int] = 'us-east-1'
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : List[str] = 'accelerate-sagemaker-1'
__UpperCAmelCase : int = '1.6'
__UpperCAmelCase : List[Any] = '4.4'
__UpperCAmelCase : Any = 'train.py'
__UpperCAmelCase : Tuple = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
__UpperCAmelCase : Optional[Any] = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , _a )
assert isinstance(converted_args['''do_train'''] , _a )
assert isinstance(converted_args['''epochs'''] , _a )
assert isinstance(converted_args['''learning_rate'''] , _a )
assert isinstance(converted_args['''max_steps'''] , _a )
with pytest.raises(_a ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 365
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 221
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowerCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
"""simple docstring"""
A__ = bnb_quantization_config.load_in_abit
A__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
A__ = []
# custom device map
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(device_map.keys() ) > 1:
A__ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A__ = get_keys_to_not_convert(UpperCamelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase__ )
A__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A__ = []
A__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase__ )
# compatibility with peft
A__ = load_in_abit
A__ = load_in_abit
A__ = get_parameter_device(UpperCamelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
A__ = replace_with_bnb_layers(UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
# convert param to the right dtype
A__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A__ = name.replace('.weight' , '' ).replace('.bias' , '' )
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase__ ):
param.to(UpperCamelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A__ = replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
A__ = get_quantized_model_device_map(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , max_memory=UpperCamelCase__ , no_split_module_classes=UpperCamelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A__ = True
A__ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase__ , offload_state_dict=UpperCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase__ , device_map=UpperCamelCase__ , offload_dir=UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A__ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
A__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A__ = {}
A__ = special_dtypes
A__ = no_split_module_classes
A__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A__ = get_balanced_memory(
UpperCamelCase__ , low_zero=(device_map == 'balanced_low_0') , max_memory=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = max_memory
A__ = infer_auto_device_map(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# check if don't have any quantized module on the cpu
A__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
"""simple docstring"""
if modules_to_not_convert is None:
A__ = []
A__ , A__ = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ):
"""simple docstring"""
A__ = False
for name, module in model.named_children():
if current_key_name is None:
A__ = []
current_key_name.append(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A__ = '.'.join(UpperCamelCase__ )
A__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
A__ = module.weight.data
if module.bias is not None:
A__ = module.bias.data
bnb_module.requires_grad_(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = True
if len(list(module.children() ) ) > 0:
A__ , A__ = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
with init_empty_weights():
A__ = deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A__ = find_tied_parameters(UpperCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ = sum(UpperCamelCase__ , [] )
A__ = len(UpperCamelCase__ ) > 0
# Check if it is a base model
A__ = False
if hasattr(UpperCamelCase__ , 'base_model_prefix' ):
A__ = not hasattr(UpperCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ = list(model.named_children() )
A__ = [list_modules[-1][0]]
# add last module together with tied weights
A__ = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
A__ = list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ )
# remove ".weight" from the keys
A__ = ['.weight', '.bias']
A__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ = name.replace(UpperCamelCase__ , '' )
filtered_module_names.append(UpperCamelCase__ )
return filtered_module_names
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
for m in model.modules():
if isinstance(UpperCamelCase__ , bnb.nn.Linearabit ):
return True
return False
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return next(parameter.parameters() ).device
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , 0 , dtype=UpperCamelCase__ , value=UpperCamelCase__ )
A__ = param_name
A__ = model
if "." in tensor_name:
A__ = tensor_name.split('.' )
for split in splits[:-1]:
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
A__ = new_module
A__ = splits[-1]
# offload weights
A__ = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , UpperCamelCase__ , index=UpperCamelCase__ , )
else:
offload_weight(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
offload_weight(UpperCamelCase__ , param_name.replace('weight' , 'SCB' ) , UpperCamelCase__ , index=UpperCamelCase__ )
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , 'meta' , dtype=UpperCamelCase__ , value=torch.empty(*param.size() ) )
| 221
| 1
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowercase ( snake_case_ : List[str] ) ->Any:
'''simple docstring'''
__A : str = args.pruning_method
__A : Optional[Any] = args.threshold
__A : Optional[int] = args.model_name_or_path.rstrip('''/''' )
__A : Optional[int] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
__A : Union[str, Any] = torch.load(os.path.join(snake_case_ ,'''pytorch_model.bin''' ) )
__A : str = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__A : Any = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
__A : int = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
__A : Any = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
__A : Optional[Any] = MagnitudeBinarizer.apply(inputs=snake_case_ ,threshold=snake_case_ )
__A : Any = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__A : Optional[Any] = name[:-6]
__A : Tuple = model[F"""{prefix_}mask_scores"""]
__A : Any = TopKBinarizer.apply(snake_case_ ,snake_case_ )
__A : str = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__A : Dict = name[:-6]
__A : List[str] = model[F"""{prefix_}mask_scores"""]
__A : Dict = ThresholdBinarizer.apply(snake_case_ ,snake_case_ ,snake_case_ )
__A : Tuple = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__A : Any = name[:-6]
__A : str = model[F"""{prefix_}mask_scores"""]
__A , __A : List[Any] = -0.1, 1.1
__A : Union[str, Any] = torch.sigmoid(snake_case_ )
__A : Any = s * (r - l) + l
__A : int = s_bar.clamp(min=0.0 ,max=1.0 )
__A : Optional[int] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
__A : Optional[Any] = os.path.join(
os.path.dirname(snake_case_ ) ,F"""bertarized_{os.path.basename(snake_case_ )}""" )
if not os.path.isdir(snake_case_ ):
shutil.copytree(snake_case_ ,snake_case_ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(snake_case_ ,os.path.join(snake_case_ ,'''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
a_ = parser.parse_args()
main(args)
| 291
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : int = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase__ ( lowercase ):
lowercase__ = """visual_bert"""
def __init__( self : List[Any] ,lowerCamelCase__ : Tuple=30522 ,lowerCamelCase__ : str=768 ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : Dict=3072 ,lowerCamelCase__ : List[str]="gelu" ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : Optional[int]=1E-12 ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : List[str]=0 ,lowerCamelCase__ : List[str]=2 ,**lowerCamelCase__ : str ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Tuple = vocab_size
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Union[str, Any] = visual_embedding_dim
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Tuple = type_vocab_size
_UpperCamelCase : Optional[Any] = layer_norm_eps
_UpperCamelCase : List[Any] = bypass_transformer
_UpperCamelCase : Optional[Any] = special_visual_initialize
| 83
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25
| 0
|
snake_case_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
snake_case_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCamelCase__ ( snake_case_ : dict[int, list[int]] , snake_case_ : int , snake_case_ : list[bool] ) -> list[int]:
__snake_case = True
__snake_case = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case_ , snake_case_ , snake_case_ )
order.append(snake_case_ )
return order
def lowerCamelCase__ ( snake_case_ : dict[int, list[int]] , snake_case_ : int , snake_case_ : list[bool] ) -> list[int]:
__snake_case = True
__snake_case = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case_ , snake_case_ , snake_case_ )
return component
def lowerCamelCase__ ( snake_case_ : dict[int, list[int]] ) -> list[list[int]]:
__snake_case = len(snake_case_ ) * [False]
__snake_case = {vert: [] for vert in range(len(snake_case_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case_ )
__snake_case = []
for i, was_visited in enumerate(snake_case_ ):
if not was_visited:
order += topology_sort(snake_case_ , snake_case_ , snake_case_ )
__snake_case = []
__snake_case = len(snake_case_ ) * [False]
for i in range(len(snake_case_ ) ):
__snake_case = order[len(snake_case_ ) - i - 1]
if not visited[vert]:
__snake_case = find_components(snake_case_ , snake_case_ , snake_case_ )
components_list.append(snake_case_ )
return components_list
| 238
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238
| 1
|
import os
from distutils.util import strtobool
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for e in env_keys:
a__ : Dict =int(os.environ.get(SCREAMING_SNAKE_CASE , -1 ) )
if val >= 0:
return val
return default
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
a__ : Optional[int] =os.environ.get(SCREAMING_SNAKE_CASE , str(SCREAMING_SNAKE_CASE ) )
return strtobool(SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int...
def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int="no" ):
"""simple docstring"""
a__ : int =os.environ.get(SCREAMING_SNAKE_CASE , str(SCREAMING_SNAKE_CASE ) )
return value
| 95
|
def _A ( SCREAMING_SNAKE_CASE : int = 50 ):
"""simple docstring"""
a__ : Any =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 95
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('''fixtures''')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('''fixtures/dummy-config.json''')
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = 0
def lowercase_ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase ).to_dict()
config_dict.pop('''feature_extractor_type''' )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(**__lowerCamelCase )
# save in new folder
model_config.save_pretrained(__lowerCamelCase )
config.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] ) -> Tuple:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def lowercase_ ( self : List[str] ) -> int:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Dict ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self : List[Any] ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def lowercase_ ( self : List[Any] ) -> Tuple:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Optional[Any] ) -> int:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = True
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(__lowerCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 218
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.log_outputs
SCREAMING_SNAKE_CASE__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
SCREAMING_SNAKE_CASE__ = load_metric('''wer''' )
SCREAMING_SNAKE_CASE__ = load_metric('''cer''' )
# compute metrics
SCREAMING_SNAKE_CASE__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
SCREAMING_SNAKE_CASE__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
SCREAMING_SNAKE_CASE__ = F'''WER: {wer_result}\nCER: {cer_result}'''
print(_A )
with open(F'''{dataset_id}_eval_results.txt''' , '''w''' ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE__ = F'''log_{dataset_id}_predictions.txt'''
SCREAMING_SNAKE_CASE__ = F'''log_{dataset_id}_targets.txt'''
with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t:
# mapping function to write output
def write_to_file(_A , _A ):
p.write(F'''{i}''' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F'''{i}''' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_A , with_indices=_A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE__ = re.sub(_A , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE__ = ''' '''.join(text.split(_A ) )
return text
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(args.model_id )
SCREAMING_SNAKE_CASE__ = feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE__ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE__ = 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_A ):
SCREAMING_SNAKE_CASE__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
SCREAMING_SNAKE_CASE__ = prediction['''text''']
SCREAMING_SNAKE_CASE__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE__ = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
main(args)
| 218
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__SCREAMING_SNAKE_CASE =None
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
__SCREAMING_SNAKE_CASE ={
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE =["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = NllbTokenizer
lowercase = []
lowercase = []
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=False ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : Dict = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token
lowercase_ : List[str] = legacy_behaviour
super().__init__(
vocab_file=__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase ,additional_special_tokens=__UpperCamelCase ,legacy_behaviour=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : Union[str, Any] = vocab_file
lowercase_ : Optional[int] = False if not self.vocab_file else True
lowercase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase_ : Dict = {
lang_code: self.convert_tokens_to_ids(__UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : Tuple = src_lang if src_lang is not None else 'eng_Latn'
lowercase_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : Optional[Any] = src_lang
lowercase_ : Optional[int] = self(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : int = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : List[str] = tgt_lang_id
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = "eng_Latn" ,__UpperCamelCase = None ,__UpperCamelCase = "fra_Latn" ,**__UpperCamelCase ,) -> BatchEncoding:
'''simple docstring'''
lowercase_ : str = src_lang
lowercase_ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = self.convert_tokens_to_ids(__UpperCamelCase )
if self.legacy_behaviour:
lowercase_ : Optional[int] = []
lowercase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Optional[Any] = [self.cur_lang_code]
lowercase_ : List[Any] = [self.eos_token_id]
lowercase_ : str = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Any = self.convert_tokens_to_ids(__UpperCamelCase )
if self.legacy_behaviour:
lowercase_ : Optional[Any] = []
lowercase_ : int = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Union[str, Any] = [self.cur_lang_code]
lowercase_ : List[str] = [self.eos_token_id]
lowercase_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ : List[Any] = os.path.join(
__UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file ,__UpperCamelCase )
return (out_vocab_file,)
| 213
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase ( yaml.SafeLoader ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase_ : str = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else key for key in keys]
lowercase_ : List[Any] = Counter(__UpperCamelCase )
lowercase_ : str = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = super().construct_mapping(__UpperCamelCase ,deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase_ : Dict = full_content[1:].index('---' ) + 1
lowercase_ : Optional[int] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__SCREAMING_SNAKE_CASE )
class UpperCamelCase ( lowercase_ ):
# class attributes
lowercase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ , lowercase_ : Optional[int] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if path.exists():
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ : Dict = readme_file.read()
else:
lowercase_ : int = None
lowercase_ : Any = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as readme_file:
readme_file.write(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase = None ) -> str:
'''simple docstring'''
if readme_content is not None:
lowercase_ , lowercase_ : Optional[Any] = _split_yaml_from_readme(__UpperCamelCase )
lowercase_ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase_ : Tuple = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
lowercase_ : List[str] = yaml.load(__UpperCamelCase ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase_ : Dict = {
(key.replace('-' ,'_' ) if key.replace('-' ,'_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' ,'-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=__UpperCamelCase ,allow_unicode=__UpperCamelCase ,encoding='utf-8' ,).decode('utf-8' )
__SCREAMING_SNAKE_CASE ={
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__SCREAMING_SNAKE_CASE =ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__SCREAMING_SNAKE_CASE =ap.parse_args()
__SCREAMING_SNAKE_CASE =Path(args.readme_filepath)
__SCREAMING_SNAKE_CASE =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 213
| 1
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCAmelCase = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = list(s_dict.keys() )
for key in keys:
_a : Optional[Any] = R'.*/layers_(\d+)'
_a : Optional[Any] = key
if re.match(__a , __a ):
_a : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , __a )
_a : Tuple = R'(encoder|decoder)\/'
if re.match(__a , __a ):
_a : Union[str, Any] = re.match(__a , __a ).groups()
if groups[0] == "encoder":
_a : List[Any] = re.sub(R'/mlp/' , R'/1/mlp/' , __a )
_a : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , __a )
elif groups[0] == "decoder":
_a : Optional[Any] = re.sub(R'/mlp/' , R'/2/mlp/' , __a )
_a : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , __a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a : str = new_key.replace(__a , __a )
print(f"""{key} -> {new_key}""" )
_a : int = s_dict.pop(__a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : Any = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : Union[str, Any] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_a : int = s_dict[key].shape[0]
_a : Optional[int] = s_dict[key]
for idx in range(__a ):
_a : Tuple = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(__a )
return s_dict
__lowerCAmelCase = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def UpperCAmelCase_ (__a : Optional[int] , __a : Union[str, Any] ):
"""simple docstring"""
import regex as re
with open(__a , 'r' ) as f:
_a : Any = f.read()
_a : Optional[int] = re.findall(R'(.*) = ([0-9.]*)' , __a )
_a : Union[str, Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a : Optional[int] = float(__a ) if '.' in value else int(__a )
_a : List[Any] = re.findall(R'(.*activations) = \(\'(.*)\',\)' , __a )[0]
_a : Optional[int] = str(activation[1] )
_a : Union[str, Any] = num_experts
_a : int = SwitchTransformersConfig(**__a )
return config
def UpperCAmelCase_ (__a : List[Any] , __a : int , __a : Union[str, Any]=None , __a : Union[str, Any]="./" , __a : Dict=8 ):
"""simple docstring"""
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
_a : Any = checkpoints.load_tax_checkpoint(__a )
if gin_file is not None:
_a : Union[str, Any] = convert_gin_to_config(__a , __a )
else:
_a : Optional[int] = SwitchTransformersConfig.from_pretrained(__a )
_a : Optional[Any] = SwitchTransformersForConditionalGeneration(__a )
_a : Optional[int] = flax_params['target']
_a : Optional[Any] = flatten_dict(__a , sep='/' )
_a : Union[str, Any] = rename_keys(__a )
_a : Dict = unflatten_dict(__a , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__a , __a )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
__lowerCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 371
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_a : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,repo_id='test-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_a : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_a : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_a : int = c.n_embd + 1 # int
_a : str = c.resid_pdrop + 1.0 # float
_a : Dict = not c.scale_attn_weights # bool
_a : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_a ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(_a ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(_a ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(_a ,c.summary_type ,'mismatch for key: summary_type' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = PretrainedConfig()
_a : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_a : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(_a ,_a )]
if len(_a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F""" {', '.join(_a )}.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_a : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = mock.Mock()
_a : Any = 500
_a : Any = {}
_a : Any = HTTPError
_a : List[Any] = {}
# Download this model to make sure it's in the cache.
_a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_a ) as mock_head:
_a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained('bert-base-cased' )
_a : List[str] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_a : str = 2
json.dump(configuration.to_dict() ,open(os.path.join(_a ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_a : Tuple = ['config.42.0.0.json']
_a : int = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a ,'config.4.0.0.json' ) ,os.path.join(_a ,'config.42.0.0.json' ) )
_a : int = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size ,768 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_a : Optional[int] = 'v4.0.0'
_a, _a : Tuple = new_transformers.models.auto.AutoConfig.from_pretrained(
_a ,return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_a : str = 'v3.0.0'
_a : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size ,768 )
| 5
| 0
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = False ):
if radian_mode:
return [magnitude * cos(_snake_case ), magnitude * sin(_snake_case )]
return [magnitude * cos(radians(_snake_case ) ), magnitude * sin(radians(_snake_case ) )]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = 10**-1 ):
SCREAMING_SNAKE_CASE__ : NDArray[floataa] = cross(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : float = sum(_snake_case )
return abs(_snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase__ : int = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
UpperCAmelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase__ : List[str] = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
UpperCAmelCase__ : Optional[int] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase__ : str = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
UpperCAmelCase__ : Tuple = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 25
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25
| 1
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
# Initialise PyTorch model
lowercase__ : Dict = AlbertConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : str = AlbertForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 302
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
lowercase_ = 8.988e9 # units = N * m^s * C^-2
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> List[Any]:
__a = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__a = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__a = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__a = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__a = (COULOMBS_CONSTANT * charge_product / abs(__SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowercase : Union[str, Any] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_lowercase : int = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Any = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : str = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_lowercase : Tuple = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_lowercase : Tuple = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_lowercase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_lowercase : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_lowercase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_lowercase : List[str] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_lowercase : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_lowercase : List[Any] = ""
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
lowercase_ : Optional[int] = ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : str = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : int = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
| 93
| 0
|
import math
import os
import sys
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = ''
try:
with open(A__, 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE_ : List[Any] = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE_ : List[str] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def a__ ( A__, A__, A__, A__ ):
lexicon.pop(A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = last_match_id
if math.loga(A__ ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE_ : Tuple = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE_ : str = bin(A__ )[2:]
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Any = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = '', ''
SCREAMING_SNAKE_CASE_ : List[str] = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE_ : Tuple = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(A__, A__, A__, A__ )
index += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE_ : List[Any] = lexicon[curr_string]
result += last_match_id
return result
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.getsize(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = bin(A__ )[2:]
SCREAMING_SNAKE_CASE_ : Tuple = len(A__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 8
try:
with open(A__, 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE_ : str = [
to_write[i : i + byte_length]
for i in range(0, len(A__ ), A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(A__, 2 ).to_bytes(1, byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = read_file_binary(A__ )
SCREAMING_SNAKE_CASE_ : str = compress_data(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = add_file_length(A__, A__ )
write_file_binary(A__, A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 162
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase__ : Optional[Any] ='src/transformers'
lowerCAmelCase__ : int ='docs/source/en/tasks'
def a__ ( A__, A__, A__ ):
with open(A__, 'r', encoding='utf-8', newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : Any = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : int = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ : Any =direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase__ : Dict ={
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase__ : Union[str, Any] ={
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__, set() )
SCREAMING_SNAKE_CASE_ : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def a__ ( A__, A__=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = _find_text_in_file(
filename=os.path.join(A__, A__ ), start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->', end_prompt='<!--End of the generated tip-->', )
SCREAMING_SNAKE_CASE_ : str = get_model_list_for_task(A__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A__, A__ ), 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ : Union[str, Any] =parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 162
| 1
|
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A : int = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : List[Any] =4_2
__UpperCAmelCase : Tuple =None
__UpperCAmelCase : Optional[Any] =None
__UpperCAmelCase : Any =None
__UpperCAmelCase : List[str] =None
def snake_case ( self ):
__lowerCAmelCase = _str_to_version_tuple(self.version_str )
def __repr__( self ):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def snake_case ( self ):
return self.major, self.minor, self.patch
def snake_case ( self , __a ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return Version(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return other
raise TypeError(f"{other} (type {type(__lowerCamelCase )}) cannot be compared to version." )
def __eq__( self , __a ):
try:
__lowerCAmelCase = self._validate_operand(__lowerCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , __a ):
__lowerCAmelCase = self._validate_operand(__lowerCamelCase )
return self.tuple < other.tuple
def __hash__( self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def snake_case ( cls , __a ):
__lowerCAmelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def snake_case ( self ):
return self.version_str
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = _VERSION_REG.match(UpperCamelCase__ )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(UpperCamelCase__ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return ".".join(str(UpperCamelCase__ ) for v in version_tuple )
| 57
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """visual_bert"""
def __init__(self :str , _UpperCamelCase :Dict=3_0522 , _UpperCamelCase :Tuple=768 , _UpperCamelCase :Any=512 , _UpperCamelCase :Any=12 , _UpperCamelCase :str=12 , _UpperCamelCase :Optional[Any]=3072 , _UpperCamelCase :Tuple="gelu" , _UpperCamelCase :List[str]=0.1 , _UpperCamelCase :List[Any]=0.1 , _UpperCamelCase :Tuple=512 , _UpperCamelCase :Optional[int]=2 , _UpperCamelCase :Union[str, Any]=0.0_2 , _UpperCamelCase :Dict=1e-12 , _UpperCamelCase :str=False , _UpperCamelCase :int=True , _UpperCamelCase :Tuple=1 , _UpperCamelCase :Dict=0 , _UpperCamelCase :Union[str, Any]=2 , **_UpperCamelCase :List[Any] , )-> Dict:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
__A = vocab_size
__A = max_position_embeddings
__A = hidden_size
__A = visual_embedding_dim
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = type_vocab_size
__A = layer_norm_eps
__A = bypass_transformer
__A = special_visual_initialize
| 250
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _a ( lowerCamelCase: Dict=None ) -> Tuple:
'''simple docstring'''
if subparsers is not None:
__A = subparsers.add_parser('''test''' )
else:
__A = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def _a ( lowerCamelCase: Optional[int] ) -> str:
'''simple docstring'''
__A = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
__A = script_name
else:
__A = F"""--config_file={args.config_file} {script_name}"""
__A = ['''accelerate-launch'''] + test_args.split()
__A = execute_subprocess_async(lowerCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _a ( ) -> str:
'''simple docstring'''
__A = test_command_parser()
__A = parser.parse_args()
test_command(lowerCamelCase )
if __name__ == "__main__":
main()
| 250
| 1
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : List[Any] = set()
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : List[str] = n + 1 # maximum limit
for a in range(2 , UpperCAmelCase_ ):
for b in range(2 , UpperCAmelCase_ ):
__lowerCamelCase : Union[str, Any] = a**b # calculates the current power
collect_powers.add(UpperCAmelCase_ ) # adds the result to the set
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 185
|
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def lowercase_ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=(4, 4, 64, 64) , SCREAMING_SNAKE_CASE_=False ) -> str:
__lowerCamelCase : List[str] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Tuple = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return image
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="CompVis/stable-diffusion-v1-4" ) -> Dict:
__lowerCamelCase : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Optional[Any] = 'bf16' if fpaa else None
__lowerCamelCase , __lowerCamelCase : str = FlaxUNetaDConditionModel.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='unet' , dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ )
return model, params
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=(4, 77, 7_68) , SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
__lowerCamelCase : Any = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 10_00, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.get_latents(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = model.apply(
{'params': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 10_00, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : List[str] = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.get_latents(SCREAMING_SNAKE_CASE_ , shape=(4, 4, 96, 96) , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , shape=(4, 77, 10_24) , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = model.apply(
{'params': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Tuple = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
| 185
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _A , _A , _A ):
# Initialise PyTorch model
a : str = RemBertConfig.from_json_file(_A )
print('Building PyTorch model from configuration: {}'.format(str(_A ) ) )
a : Dict = RemBertModel(_A )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_A , _A , _A )
# Save pytorch-model
print('Save PyTorch model to {}'.format(_A ) )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
lowerCAmelCase: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase: Union[str, Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 96
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = """encoder-decoder"""
lowercase__ = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
a : List[str] = kwargs.pop('encoder' )
a : Optional[Any] = encoder_config.pop('model_type' )
a : Tuple = kwargs.pop('decoder' )
a : Optional[int] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
a : Any = AutoConfig.for_model(__snake_case , **__snake_case )
a : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
a : Tuple = True
@classmethod
def lowercase_ ( cls : int , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
a : List[Any] = True
a : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = copy.deepcopy(self.__dict__ )
a : List[str] = self.encoder.to_dict()
a : Optional[int] = self.decoder.to_dict()
a : Optional[Any] = self.__class__.model_type
return output
| 96
| 1
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __A ( A_ ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = tempfile.mkdtemp()
lowercase__ : Tuple = 8
# DPR tok
lowercase__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ : List[Any] = os.path.join(self.tmpdirname ,'''dpr_tokenizer''' )
os.makedirs(_snake_case ,exist_ok=_snake_case )
lowercase__ : Dict = os.path.join(_snake_case ,DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowercase__ : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase__ : Union[str, Any] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase__ : List[Any] = {'''unk_token''': '''<unk>'''}
lowercase__ : Optional[int] = os.path.join(self.tmpdirname ,'''bart_tokenizer''' )
os.makedirs(_snake_case ,exist_ok=_snake_case )
lowercase__ : Optional[Any] = os.path.join(_snake_case ,BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(_snake_case ,BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def UpperCAmelCase ( self : Dict ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'''dpr_tokenizer''' ) )
def UpperCAmelCase ( self : Optional[Any] ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'''dpr_tokenizer''' ) )
def UpperCAmelCase ( self : List[Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'''bart_tokenizer''' ) )
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' ,string_factory='''Flat''' ,metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = self.get_dummy_dataset()
lowercase__ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,)
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowercase__ : str = dataset
lowercase__ : List[Any] = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
return retriever
def UpperCAmelCase ( self : int ,_snake_case : bool ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.get_dummy_dataset()
lowercase__ : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name='''custom''' ,)
if from_disk:
lowercase__ : List[str] = os.path.join(self.tmpdirname ,'''dataset''' )
lowercase__ : Any = os.path.join(self.tmpdirname ,'''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname ,'''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname ,'''dataset''' ) )
del dataset
lowercase__ : List[str] = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
else:
lowercase__ : str = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,_snake_case ) ,)
return retriever
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' ,string_factory='''Flat''' ,metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : str = os.path.join(self.tmpdirname ,'''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' ,index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] ,open(index_file_name + '''.index_meta.dpr''' ,'''wb''' ) )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname ,'''psgs_w100.tsv.pkl''' )
lowercase__ : Optional[Any] = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_snake_case ,open(_snake_case ,'''wb''' ) )
lowercase__ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name='''legacy''' ,index_path=self.tmpdirname ,)
lowercase__ : List[str] = RagRetriever(
_snake_case ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = 1
lowercase__ : Tuple = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) ,_snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] ,'''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] ,'''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : int = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowercase__ : Optional[Any] = self.get_dummy_dataset()
retriever.save_pretrained(_snake_case )
lowercase__ : Optional[int] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase__ : int = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Dict = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) ,_snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] ,'''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] ,'''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowercase__ : Optional[int] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase__ : str = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = 1
lowercase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
lowercase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : List[str] = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) ,_snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] ,'''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] ,'''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowercase__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowercase__ : Optional[int] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase__ : int = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : str = 1
lowercase__ : Dict = self.get_dummy_legacy_index_retriever()
lowercase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : int = retriever.retrieve(_snake_case ,n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) ,_snake_case )
self.assertEqual(doc_dicts[0]['''text'''][0] ,'''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] ,'''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ : Dict = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
lowercase__ : Optional[int] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase__ : Dict = retriever.retrieve(_snake_case ,n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
import torch
lowercase__ : int = 1
lowercase__ : Tuple = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Union[str, Any] = [[5, 7], [10, 11]]
lowercase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase__ : Tuple = retriever(_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case )
lowercase__ , lowercase__ , lowercase__ : int = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertIsInstance(_snake_case ,np.ndarray )
lowercase__ : Optional[Any] = retriever(
_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case ,return_tensors='''pt''' ,)
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case ,torch.Tensor )
self.assertIsInstance(_snake_case ,torch.Tensor )
self.assertIsInstance(_snake_case ,torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowercase__ : List[str] = self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Dict = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
retriever.set_ctx_encoder_tokenizer(_snake_case )
lowercase__ : Any = [[5, 7], [10, 11]]
lowercase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowercase__ : Union[str, Any] = retriever(_snake_case ,_snake_case ,prefix=retriever.config.generator.prefix ,n_docs=_snake_case )
self.assertEqual(
len(_snake_case ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) ,_snake_case ) # check for doc token related keys in dictionary.
| 16
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : str ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 155
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
class _snake_case ( __lowercase ):
snake_case__ = "encoder-decoder"
snake_case__ = True
def __init__( self : Any , **UpperCAmelCase : int ):
super().__init__(**_a )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__lowerCamelCase : int = kwargs.pop("encoder" )
__lowerCamelCase : Dict = encoder_config.pop("model_type" )
__lowerCamelCase : Optional[Any] = kwargs.pop("decoder" )
__lowerCamelCase : Dict = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
__lowerCamelCase : Tuple = AutoConfig.for_model(_a , **_a )
__lowerCamelCase : List[Any] = AutoConfig.for_model(_a , **_a )
__lowerCamelCase : List[str] = True
@classmethod
def lowerCamelCase__ ( cls : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[int] ):
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
__lowerCamelCase : str = True
__lowerCamelCase : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_a )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Any = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Any = self.encoder.to_dict()
__lowerCamelCase : Tuple = self.decoder.to_dict()
__lowerCamelCase : Tuple = self.__class__.model_type
return output
| 350
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _snake_case ( a__ ):
snake_case__ = "visual_bert"
def __init__( self : int , UpperCAmelCase : Any=30522 , UpperCAmelCase : Tuple=768 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Any=3072 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Dict=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Dict=1E-12 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : List[str]=2 , **UpperCAmelCase : str , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Union[str, Any] = visual_embedding_dim
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : List[str] = type_vocab_size
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : List[str] = bypass_transformer
__lowerCamelCase : Optional[int] = special_visual_initialize
| 64
| 0
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Union[str, Any] = VideoMAEConfig()
set_architecture_configs(_lowerCAmelCase , _lowerCAmelCase )
if "finetuned" not in model_name:
snake_case__ : Tuple = False
if "finetuned" in model_name:
snake_case__ : Any = """huggingface/label-files"""
if "kinetics" in model_name:
snake_case__ : Union[str, Any] = 400
snake_case__ : Tuple = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
snake_case__ : Optional[Any] = 174
snake_case__ : List[str] = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
snake_case__ : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : str = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
if "small" in model_name:
snake_case__ : Any = 384
snake_case__ : Optional[int] = 1_536
snake_case__ : Optional[int] = 12
snake_case__ : Dict = 16
snake_case__ : Optional[Any] = 12
snake_case__ : Union[str, Any] = 3
snake_case__ : str = 192
snake_case__ : List[str] = 768
elif "large" in model_name:
snake_case__ : Optional[int] = 1_024
snake_case__ : Optional[Any] = 4_096
snake_case__ : str = 24
snake_case__ : Optional[Any] = 16
snake_case__ : int = 12
snake_case__ : Tuple = 8
snake_case__ : List[Any] = 512
snake_case__ : Union[str, Any] = 2_048
elif "huge" in model_name:
snake_case__ : Optional[int] = 1_280
snake_case__ : Tuple = 5_120
snake_case__ : Dict = 32
snake_case__ : Tuple = 16
snake_case__ : Tuple = 12
snake_case__ : Tuple = 8
snake_case__ : Union[str, Any] = 640
snake_case__ : Optional[Any] = 2_560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def __snake_case( _lowerCAmelCase ) -> List[str]:
if "encoder." in name:
snake_case__ : int = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
snake_case__ : Any = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
snake_case__ : Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
snake_case__ : str = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case__ : Optional[int] = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case__ : int = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
snake_case__ : List[Any] = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
snake_case__ : Optional[Any] = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
snake_case__ : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
snake_case__ : List[Any] = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
snake_case__ : List[str] = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
snake_case__ : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case__ : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case__ : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
snake_case__ : str = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
snake_case__ : Dict = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
snake_case__ : Any = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case__ : int = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case__ : Tuple = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
snake_case__ : Any = name.replace("""head""" , """classifier""" )
return name
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
snake_case__ : str = orig_state_dict.pop(_lowerCAmelCase )
if key.startswith("""encoder.""" ):
snake_case__ : Dict = key.replace("""encoder.""" , """""" )
if "qkv" in key:
snake_case__ : Tuple = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
snake_case__ : Optional[Any] = config.decoder_hidden_size
snake_case__ : Optional[int] = int(key_split[2] )
snake_case__ : Any = """decoder.decoder_layers."""
if "weight" in key:
snake_case__ : str = val[:dim, :]
snake_case__ : Dict = val[dim : dim * 2, :]
snake_case__ : List[str] = val[-dim:, :]
else:
snake_case__ : List[str] = config.hidden_size
snake_case__ : List[str] = int(key_split[1] )
snake_case__ : int = """videomae.encoder.layer."""
if "weight" in key:
snake_case__ : Optional[int] = val[:dim, :]
snake_case__ : Optional[int] = val[dim : dim * 2, :]
snake_case__ : Tuple = val[-dim:, :]
else:
snake_case__ : List[Any] = val
return orig_state_dict
def __snake_case( ) -> Any:
snake_case__ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
snake_case__ : List[str] = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
snake_case__ : Optional[Any] = get_videomae_config(_lowerCAmelCase )
if "finetuned" in model_name:
snake_case__ : Tuple = VideoMAEForVideoClassification(_lowerCAmelCase )
else:
snake_case__ : str = VideoMAEForPreTraining(_lowerCAmelCase )
# download original checkpoint, hosted on Google Drive
snake_case__ : Dict = """pytorch_model.bin"""
gdown.cached_download(_lowerCAmelCase , _lowerCAmelCase , quiet=_lowerCAmelCase )
snake_case__ : Optional[Any] = torch.load(_lowerCAmelCase , map_location="""cpu""" )
if "model" in files:
snake_case__ : int = files["""model"""]
else:
snake_case__ : Tuple = files["""module"""]
snake_case__ : Optional[int] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# verify model on basic input
snake_case__ : str = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case__ : int = prepare_video()
snake_case__ : int = image_processor(_lowerCAmelCase , return_tensors="""pt""" )
if "finetuned" not in model_name:
snake_case__ : Optional[int] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
snake_case__ : Dict = torch.load(_lowerCAmelCase )
snake_case__ : int = model(**_lowerCAmelCase )
snake_case__ : str = outputs.logits
snake_case__ : Union[str, Any] = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case__ : str = torch.Size([1, 400] )
snake_case__ : Union[str, Any] = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case__ : List[str] = torch.Size([1, 174] )
snake_case__ : List[Any] = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case__ : Union[str, Any] = torch.Size([1, 1_408, 1_536] )
snake_case__ : int = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case__ : List[Any] = torch.Size([1, 1_408, 1_536] )
snake_case__ : List[Any] = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case__ : List[Any] = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case__ : Any = torch.Size([1, 1_408, 1_536] )
snake_case__ : Tuple = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case__ : List[Any] = torch.Size([1, 400] )
snake_case__ : int = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case__ : List[Any] = torch.Size([1, 400] )
snake_case__ : str = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case__ : Dict = torch.Size([1, 400] )
snake_case__ : List[Any] = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case__ : Tuple = torch.Size([1, 400] )
snake_case__ : str = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case__ : Any = torch.Size([1, 1_408, 1_536] )
snake_case__ : Optional[Any] = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case__ : Union[str, Any] = torch.Size([1, 174] )
snake_case__ : Optional[Any] = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case__ : int = torch.Size([1, 1_408, 1_536] )
snake_case__ : Dict = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case__ : Optional[Any] = torch.Size([1, 174] )
snake_case__ : Dict = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f"Model name not supported. Should be one of {model_names}" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case__ : Any = outputs.loss
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(_lowerCAmelCase , organization="""nielsr""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 35
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowercase_ (A : List[str] ):
snake_case__ : Tuple = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase_ (A : str ):
snake_case__ , snake_case__ : Union[str, Any] = emb.weight.shape
snake_case__ : str = nn.Linear(A , A , bias=A )
snake_case__ : str = emb.weight.data
return lin_layer
def lowercase_ (A : Optional[int] , A : Union[str, Any]=None ):
snake_case__ : Any = {}
for old_key in state_dict.keys():
snake_case__ : Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
snake_case__ : int = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''' )
else:
snake_case__ : Any = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
snake_case__ : Dict = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
snake_case__ : str = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
snake_case__ : str = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
snake_case__ : Tuple = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
snake_case__ : Tuple = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
snake_case__ : Optional[int] = key.replace('final_layer_norm' , 'ff_layer_norm' )
snake_case__ : Dict = state_dict[old_key]
return new_dict
def lowercase_ (A : List[Any] , A : Tuple , A : List[Any] , A : List[str] , A : str = WEIGHTS_NAME ):
snake_case__ : Dict = []
snake_case__ : str = 0
os.makedirs(A , exist_ok=A )
for expert in range(A ):
snake_case__ : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(A ):
snake_case__ : Optional[Any] = torch.load(A )['model']
remove_ignore_keys_(A )
snake_case__ : Optional[Any] = rename_fairseq_keys(A , A )
snake_case__ : Dict = os.path.join(
A , weights_name.replace('.bin' , F'''-{len(A )+1:05d}-of-???.bin''' ) )
torch.save(A , A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A )[0]].dtype )
# Add the last block
snake_case__ : Tuple = os.path.join(A , weights_name.replace('.bin' , F'''-{len(A )+1:05d}-of-???.bin''' ) )
snake_case__ : Union[str, Any] = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(A )
snake_case__ : str = rename_fairseq_keys(A , A )
snake_case__ : Any = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A ) == 1:
snake_case__ : Any = os.path.join(A , A )
torch.save(A , A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A , A )
# Otherwise, let's build the index
snake_case__ : Tuple = {}
for idx, shard in enumerate(A ):
snake_case__ : Optional[int] = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(A ):05d}.bin''' )
snake_case__ : List[Any] = os.path.join(A , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A , os.path.join(A , A ) )
for key in shard:
snake_case__ : Any = shard_file
# Add the metadata
snake_case__ : int = {'total_size': total_size}
snake_case__ : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(A , A ) , 'w' , encoding='utf-8' ) as f:
snake_case__ : Any = json.dumps(A , indent=2 , sort_keys=A ) + '\n'
f.write(A )
return metadata, index
if __name__ == "__main__":
a_ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
a_ :Optional[Any] = parser.parse_args()
a_ , a_ :Optional[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
a_ :List[str] = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ :int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 277
| 0
|
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 61
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : list[list] ) -> list[list]:
'''simple docstring'''
_UpperCAmelCase = current_set.copy()
for row_index, row in enumerate(__lowercase ):
_UpperCAmelCase = row[0]
for column_index, column in enumerate(__lowercase ):
if magnitude == 0:
_UpperCAmelCase = column
continue
_UpperCAmelCase = column / magnitude
# Subtract to cancel term
_UpperCAmelCase = current_set[0]
_UpperCAmelCase = [first_row]
_UpperCAmelCase = current_set[1::]
for row in current_set:
_UpperCAmelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__lowercase )
continue
for column_index in range(len(__lowercase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__lowercase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_UpperCAmelCase = final_set[0]
_UpperCAmelCase = []
_UpperCAmelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_UpperCAmelCase = simplify(__lowercase )
for i in range(len(__lowercase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __lowercase )
_UpperCAmelCase = resultant
return final_set
def UpperCAmelCase_ ( __lowercase : list[list] ) -> list:
'''simple docstring'''
if len(__lowercase ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_UpperCAmelCase = len(__lowercase ) + 1
if any(len(__lowercase ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(__lowercase , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(__lowercase ) == 1:
return [equations[0][-1] / equations[0][0]]
_UpperCAmelCase = equations.copy()
if any(0 in row for row in data_set ):
_UpperCAmelCase = data_set.copy()
_UpperCAmelCase = []
for row_index, row in enumerate(__lowercase ):
if 0 not in row:
_UpperCAmelCase = data_set.pop(__lowercase )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , __lowercase )
_UpperCAmelCase = data_set.copy()
_UpperCAmelCase = simplify(__lowercase )
_UpperCAmelCase = simplified[::-1]
_UpperCAmelCase = []
for row in simplified:
_UpperCAmelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_UpperCAmelCase = row.copy()[: len(__lowercase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__lowercase ) == 0:
solutions.append(0 )
continue
_UpperCAmelCase = temp_row[1::]
_UpperCAmelCase = temp_row[::-1]
for column_index, column in enumerate(__lowercase ):
current_solution -= column * solutions[column_index]
solutions.append(__lowercase )
_UpperCAmelCase = []
for item in solutions:
final.append(float(round(__lowercase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE :Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 22
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5
| 0
|
'''simple docstring'''
import math
class a__ :
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : list[list[float]] , a : list[int] ):
"""simple docstring"""
__lowerCamelCase = 0.0
__lowerCamelCase = 0.0
for i in range(len(a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : list[list[int | float]] , a : list[int] , a : int , a : float ):
"""simple docstring"""
for i in range(len(a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __lowerCAmelCase ( ) -> None:
# Training Examples ( m, n )
__lowerCamelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowerCamelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowerCamelCase = SelfOrganizingMap()
__lowerCamelCase = 3
__lowerCamelCase = 0.5
for _ in range(UpperCamelCase__ ):
for j in range(len(UpperCamelCase__ ) ):
# training sample
__lowerCamelCase = training_samples[j]
# Compute the winning vector
__lowerCamelCase = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# Update the winning vector
__lowerCamelCase = self_organizing_map.update(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# classify test sample
__lowerCamelCase = [0, 0, 0, 1]
__lowerCamelCase = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 367
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __lowerCAmelCase ( UpperCamelCase__ ) -> None:
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__UpperCAmelCase =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 237
| 0
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase__ = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
lowerCamelCase__ = 10
lowerCamelCase__ = 256
def __lowerCAmelCase (__lowerCAmelCase ):
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
_UpperCAmelCase : str = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def __lowerCAmelCase (__lowerCAmelCase ):
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class lowerCAmelCase__ :
def __init__( self : Tuple , *,
lowerCamelCase__ : float = 0.8_5 , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = duplication_jaccard_threshold
_UpperCAmelCase : Tuple = NUM_PERM
_UpperCAmelCase : str = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_UpperCAmelCase : List[Any] = defaultdict(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : MinHash ) ->None:
'''simple docstring'''
_UpperCAmelCase : int = self._index.query(lowerCamelCase__ )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCamelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->List[List[Dict]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = []
for base, duplicates in self._duplicate_clusters.items():
_UpperCAmelCase : Tuple = [base] + list(lowerCamelCase__ )
# reformat the cluster to be a list of dict
_UpperCAmelCase : List[str] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowerCamelCase__ )
return duplicate_clusters
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_duplicate_clusters()
with open(lowerCamelCase__ , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : str = element
_UpperCAmelCase : Union[str, Any] = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __lowerCAmelCase (__lowerCAmelCase ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=100 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = get_tokens(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase__ = None
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = []
for elementa in cluster:
_UpperCAmelCase : Optional[Any] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
_UpperCAmelCase : str = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_UpperCAmelCase : List[str] = 1
extremes.append(__lowerCAmelCase )
return extremes
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
global _shared_dataset
_UpperCAmelCase : List[str] = dataset
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : int = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 0.8_5 ):
_UpperCAmelCase : List[Any] = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : int = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : int = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
_UpperCAmelCase : Optional[int] = element
_UpperCAmelCase : str = duplicate_indices - set(extreme_dict.keys() )
_UpperCAmelCase : Tuple = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_UpperCAmelCase : Tuple = element["base_index"] in extreme_dict
if element["is_extreme"]:
_UpperCAmelCase : str = extreme_dict[element["base_index"]]["copies"]
print(F"""Original dataset size: {len(__lowerCAmelCase )}""" )
print(F"""Number of duplicate clusters: {len(__lowerCAmelCase )}""" )
print(F"""Files in duplicate cluster: {len(__lowerCAmelCase )}""" )
print(F"""Unique files in duplicate cluster: {len(__lowerCAmelCase )}""" )
print(F"""Filtered dataset size: {len(__lowerCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 234
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tmp_path / "cache"
_UpperCAmelCase : int = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Union[str, Any] = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = tmp_path / "cache"
_UpperCAmelCase : Any = {"text": "string"}
_UpperCAmelCase : Optional[Any] = features.copy() if features else default_expected_features
_UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Union[str, Any] = TextDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = tmp_path / "cache"
_UpperCAmelCase : Dict = {"text": "string"}
_UpperCAmelCase : Union[str, Any] = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = text_path
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = [text_path]
_UpperCAmelCase : List[Any] = tmp_path / "cache"
_UpperCAmelCase : Union[str, Any] = {"text": "string"}
_UpperCAmelCase : Optional[int] = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=("train",) ):
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for split in splits:
_UpperCAmelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = tmp_path / "cache"
_UpperCAmelCase : Tuple = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Any = TextDatasetReader({"train": text_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_text_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_UpperCAmelCase : List[Any] = {"text": "string"}
_UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
_UpperCAmelCase : Optional[int] = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Tuple = TextDatasetReader({"train": text_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if split:
_UpperCAmelCase : int = {split: text_path}
else:
_UpperCAmelCase : Tuple = "train"
_UpperCAmelCase : List[str] = {"train": text_path, "test": text_path}
_UpperCAmelCase : Optional[Any] = tmp_path / "cache"
_UpperCAmelCase : Optional[int] = {"text": "string"}
_UpperCAmelCase : int = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 234
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict ,lowercase_ : Tuple ,lowercase_ : Any=1_3 ,lowercase_ : str=3_0 ,lowercase_ : List[str]=2 ,lowercase_ : int=3 ,lowercase_ : List[str]=True ,lowercase_ : Dict=True ,lowercase_ : Optional[int]=3_2 ,lowercase_ : Optional[int]=2 ,lowercase_ : Dict=4 ,lowercase_ : Dict=3_7 ,lowercase_ : Optional[Any]="gelu" ,lowercase_ : List[Any]=0.1 ,lowercase_ : Dict=0.1 ,lowercase_ : int=1_0 ,lowercase_ : Dict=0.02 ,lowercase_ : Any=3 ,lowercase_ : List[str]=None ,lowercase_ : Optional[int]=2 ,):
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : int = patch_size
lowerCAmelCase__ : Any = num_channels
lowerCAmelCase__ : List[Any] = is_training
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Union[str, Any] = scope
lowerCAmelCase__ : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ : Optional[Any] = (image_size // patch_size) ** 2
lowerCAmelCase__ : Optional[Any] = num_patches + 2
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Any = None
if self.use_labels:
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ,lowercase_ : Optional[Any] ,lowercase_ : str ):
lowerCAmelCase__ : Any = TFDeiTModel(config=lowercase_ )
lowerCAmelCase__ : List[str] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[Any] ,lowercase_ : int ,lowercase_ : List[Any] ):
lowerCAmelCase__ : str = TFDeiTForMaskedImageModeling(config=lowercase_ )
lowerCAmelCase__ : Dict = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : int = TFDeiTForMaskedImageModeling(lowercase_ )
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : int ,lowercase_ : Union[str, Any] ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = self.type_sequence_label_size
lowerCAmelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowercase_ )
lowerCAmelCase__ : Dict = model(lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[str] = TFDeiTForImageClassification(lowercase_ )
lowerCAmelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Tuple = model(lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = config_and_inputs
lowerCAmelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self : List[Any] ):
pass
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowerCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ ,tf.keras.layers.Dense ) )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class(lowercase_ )
lowerCAmelCase__ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Union[str, Any] ,lowercase_ : Dict ,lowercase_ : Tuple=False ):
lowerCAmelCase__ : Tuple = super()._prepare_for_class(lowercase_ ,lowercase_ ,return_labels=lowercase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowerCAmelCase ( self : List[Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = TFDeiTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Dict ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : List[Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
lowerCAmelCase__ : int = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : Dict = image_processor(images=lowercase_ ,return_tensors='''tf''' )
# forward pass
lowerCAmelCase__ : Union[str, Any] = model(**lowercase_ )
# verify the logits
lowerCAmelCase__ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase_ )
lowerCAmelCase__ : Tuple = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
| 74
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
if not isinstance(A_ , A_ ):
lowerCAmelCase__ : int = f'Input value of [number={number}] must be an integer'
raise TypeError(A_ )
if number < 0:
return False
lowerCAmelCase__ : List[Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 1
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : int = AlbertTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : List[str] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'this is a test'
lowerCAmelCase__ : List[Any] = 'this is a test'
return input_text, output_text
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = '<pad>'
lowerCAmelCase__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(a ) , 30_000 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : str = self.get_rust_tokenizer()
lowerCAmelCase__ : List[Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ : str = tokenizer.tokenize(a )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Tuple = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : Dict = tokenizer.encode(a )
lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AlbertTokenizer(a , keep_accents=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [48, 25, 21, 1_289] )
lowerCAmelCase__ : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCAmelCase__ : Any = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = AlbertTokenizer(a )
lowerCAmelCase__ : Tuple = tokenizer.encode('sequence builders' )
lowerCAmelCase__ : Any = tokenizer.encode('multi-sequence build' )
lowerCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 212
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class A__ ( __magic_name__ ):
lowercase = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase = Features({'audio': Audio()} )
lowercase = Features({'labels': ClassLabel} )
lowercase = "audio"
lowercase = "labels"
def _lowerCamelCase ( self : Dict , a : Tuple ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , a ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase__ : Tuple = copy.deepcopy(self )
lowerCAmelCase__ : List[Any] = self.label_schema.copy()
lowerCAmelCase__ : List[Any] = features[self.label_column]
lowerCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 212
| 1
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Dict = 0
A_ : List[str] = [0]
A_ : str = [0]
A_ : Tuple = len(lowercase )
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase ) , 0 )
A_ : Dict = [60]
A_ : Tuple = [10]
A_ : Dict = len(lowercase )
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase ) , 0 )
def _a (self ):
A_ : Tuple = 3
A_ : Optional[Any] = [1, 2, 3]
A_ : Dict = [3, 2, 1]
A_ : Optional[int] = len(lowercase )
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase ) , 5 )
def _a (self ):
A_ : Optional[int] = 50
A_ : str = [60, 100, 120]
A_ : Tuple = [10, 20, 30]
A_ : List[Any] = len(lowercase )
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 365
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
if tokenize_kwargs is None:
A_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
A_ : str = truncation
A_ : List[str] = tokenize_kwargs
A_ : Dict = {}
if return_tensors is not None:
A_ : List[Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _a (self , lowercase , **lowercase ):
A_ : Optional[int] = self.framework
A_ : str = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
return model_inputs
def _a (self , lowercase ):
A_ : str = self.model(**lowercase )
return model_outputs
def _a (self , lowercase , lowercase=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self , *lowercase , **lowercase ):
return super().__call__(*lowercase , **lowercase )
| 135
| 0
|
from __future__ import annotations
import math
def a__ ( A_, A_, A_, A_, A_ ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, A_, A_, A_ ), minimax(depth + 1, node_index * 2 + 1, A_, A_, A_ ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, A_, A_, A_ ), minimax(depth + 1, node_index * 2 + 1, A_, A_, A_ ), )
)
def a__ ( ):
'''simple docstring'''
__magic_name__ = [90, 23, 6, 33, 21, 65, 123, 34423]
__magic_name__ = math.log(len(A_ ), 2 )
print(f'''Optimal value : {minimax(0, 0, A_, A_, A_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 88
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = SpeechTaTokenizer
lowercase__ = False
lowercase__ = True
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = SpeechTaTokenizer(__a)
_UpperCamelCase = AddedToken('''<mask>''' , lstrip=__a , rstrip=__a)
_UpperCamelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token})
tokenizer.add_tokens(['''<ctc_blank>'''])
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCAmelCase ( self , __a , __a=False , __a=20 , __a=5) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_input_output_texts(__a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer.decode(__a , clean_up_tokenization_spaces=__a)
return text, ids
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a) , __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a) , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-4] , '''œ''')
self.assertEqual(vocab_keys[-2] , '''<mask>''')
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''')
self.assertEqual(len(__a) , 81)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers(do_lower_case=__a)
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}'''):
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(__a)
self.assertNotEqual(__a , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_UpperCamelCase = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
_UpperCamelCase = tokenizer.add_tokens(__a)
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(__a)
self.assertNotEqual(__a , 0)
self.assertEqual(__a , __a)
self.assertEqual(__a , len(__a))
self.assertEqual(__a , all_size + len(__a))
_UpperCamelCase = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__a)
self.assertGreaterEqual(len(__a) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
_UpperCamelCase = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
_UpperCamelCase = tokenizer.add_special_tokens(__a)
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(__a)
self.assertNotEqual(__a , 0)
self.assertEqual(__a , __a)
self.assertEqual(__a , len(__a))
self.assertEqual(__a , all_size_a + len(__a))
_UpperCamelCase = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__a)
self.assertGreaterEqual(len(__a) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = tokenizer.tokenize('''This is a test''')
# fmt: off
self.assertListEqual(__a , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
_UpperCamelCase = tokenizer.convert_tokens_to_ids(__a)
# fmt: off
self.assertListEqual(__a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
_UpperCamelCase = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
# Use custom sequence because this tokenizer does not handle numbers.
_UpperCamelCase = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
_UpperCamelCase = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__a , )
| 194
| 0
|
import os
import pytest
from attr import dataclass
_lowerCamelCase : Union[str, Any] = "us-east-1" # defaults region
@dataclass
class __UpperCAmelCase :
UpperCamelCase = 4_2
UpperCamelCase = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
UpperCamelCase = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 1_6,
"""per_device_eval_batch_size""": 1_6,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_0_0,
"""save_steps""": 5_5_0_0,
}
UpperCamelCase = {**hyperparameters, """max_steps""": 1_0_0_0}
@property
def __magic_name__ ( self : List[str] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __magic_name__ ( self : Any ):
return F'''{self.framework}-transfromers-test'''
@property
def __magic_name__ ( self : Dict ):
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def __magic_name__ ( self : Any ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def a__ ( UpperCAmelCase : List[str] ) -> Optional[int]:
UpperCAmelCase : Dict = SageMakerTestEnvironment(framework=request.cls.framework )
| 364
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : Any, __A : Optional[int]=1_3, __A : Any=7, __A : Tuple=True, __A : int=True, __A : Dict=True, __A : Union[str, Any]=True, __A : Optional[int]=9_9, __A : Optional[int]=3_2, __A : Union[str, Any]=5, __A : Optional[int]=4, __A : str=3_7, __A : Union[str, Any]="gelu", __A : Optional[int]=0.1, __A : Optional[Any]=0.1, __A : Any=5_1_2, __A : List[str]=1_6, __A : Optional[int]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=False, __A : List[str]=True, __A : int="None", __A : List[str]=3, __A : Any=4, __A : Dict=None, ):
UpperCAmelCase : str = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : int = type_vocab_size
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Optional[Any] = num_choices
UpperCAmelCase : str = relative_attention
UpperCAmelCase : Any = position_biased_input
UpperCAmelCase : str = pos_att_type
UpperCAmelCase : Union[str, Any] = scope
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : int = None
if self.use_input_mask:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : List[str] = None
UpperCAmelCase : str = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any ):
return DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def __magic_name__ ( self : Dict, __A : str ):
self.parent.assertListEqual(list(result.loss.size() ), [] )
def __magic_name__ ( self : List[str], __A : Dict, __A : int, __A : str, __A : List[str], __A : Dict, __A : str, __A : int ):
UpperCAmelCase : Optional[int] = DebertaVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, attention_mask=__A, token_type_ids=__A )[0]
UpperCAmelCase : Optional[int] = model(__A, token_type_ids=__A )[0]
UpperCAmelCase : int = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ), [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self : Dict, __A : Union[str, Any], __A : Optional[Any], __A : Tuple, __A : Optional[int], __A : List[Any], __A : List[Any], __A : Optional[int] ):
UpperCAmelCase : int = DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str], __A : str, __A : Optional[Any], __A : List[str], __A : Optional[int], __A : List[Any], __A : int, __A : Optional[int] ):
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertListEqual(list(result.logits.size() ), [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def __magic_name__ ( self : Any, __A : Tuple, __A : Any, __A : str, __A : List[Any], __A : Dict, __A : Optional[Any], __A : List[str] ):
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : int = DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple, __A : List[str], __A : Tuple, __A : Tuple, __A : int, __A : Optional[Any], __A : Tuple, __A : Any ):
UpperCAmelCase : Union[str, Any] = DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Any = model(
__A, attention_mask=__A, token_type_ids=__A, start_positions=__A, end_positions=__A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Dict, __A : Optional[int], __A : str, __A : List[str], __A : Dict, __A : Optional[Any], __A : Union[str, Any], __A : int ):
UpperCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : int = model(
__A, attention_mask=__A, token_type_ids=__A, labels=__A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : str = DebertaVaModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self, config_class=__A, hidden_size=3_7 )
def __magic_name__ ( self : Any ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def __magic_name__ ( self : str ):
pass
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : List[str] = model(__A, attention_mask=__A )[0]
# compare the actual values for a slice.
UpperCAmelCase : List[str] = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __A, atol=1E-4 ), F'''{output[:, 1:4, 1:4]}''' )
| 99
| 0
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A = get_logger(__name__)
__A = Path(__file__).parent / "model_card_template.md"
__A = uuida().hex
__A = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
__A = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
__A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def lowerCAmelCase_ ( __a = None ) -> str:
"""simple docstring"""
lowerCamelCase__: Tuple =F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_snake_case , _snake_case ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(_snake_case , _snake_case ):
ua += "; " + user_agent
return ua
def lowerCAmelCase_ ( __a , __a = None , __a = None ) -> str:
"""simple docstring"""
if token is None:
lowerCamelCase__: Dict =HfFolder.get_token()
if organization is None:
lowerCamelCase__: Optional[Any] =whoami(_snake_case )["name"]
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(_snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
lowerCamelCase__: Tuple =args.hub_token if hasattr(_snake_case , "hub_token" ) else None
lowerCamelCase__: Tuple =get_full_repo_name(_snake_case , token=_snake_case )
lowerCamelCase__: Dict =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_snake_case , model_name=_snake_case , repo_name=_snake_case , dataset_name=args.dataset_name if hasattr(_snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(_snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(_snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(_snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(_snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(_snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(_snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
lowerCamelCase__: Optional[Any] =os.path.join(args.output_dir , "README.md" )
model_card.save(_snake_case )
def lowerCAmelCase_ ( __a , __a = None ) -> List[Any]:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCamelCase__: Tuple =str(Path(_snake_case ).as_posix() )
lowerCamelCase__: Any =re.search(R"snapshots/([^/]+)/" , _snake_case )
if search is None:
return None
lowerCamelCase__: Optional[int] =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
__A = os.path.join(hf_cache_home, "diffusers")
def lowerCAmelCase_ ( __a = None , __a = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
lowerCamelCase__: Any =DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCamelCase__: Optional[int] =old_diffusers_cache
lowerCamelCase__: List[str] =Path(_snake_case ).expanduser()
lowerCamelCase__: Optional[Any] =Path(_snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCamelCase__: str =new_cache_dir / old_blob_path.relative_to(_snake_case )
new_blob_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
os.replace(_snake_case , _snake_case )
try:
os.symlink(_snake_case , _snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
__A = 0
else:
with open(cache_version_file) as f:
try:
__A = int(f.read())
except ValueError:
__A = 0
if cache_version < 1:
__A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
__A = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"the directory exists and can be written to."
)
def lowerCAmelCase_ ( __a , __a = None ) -> str:
"""simple docstring"""
if variant is not None:
lowerCamelCase__: List[Any] =weights_name.split("." )
lowerCamelCase__: int =splits[:-1] + [variant] + splits[-1:]
lowerCamelCase__: int =".".join(_snake_case )
return weights_name
def lowerCAmelCase_ ( __a , *,
__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a=None , ) -> str:
"""simple docstring"""
lowerCamelCase__: Tuple =str(_snake_case )
if os.path.isfile(_snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(_snake_case ):
if os.path.isfile(os.path.join(_snake_case , _snake_case ) ):
# Load from a PyTorch checkpoint
lowerCamelCase__: int =os.path.join(_snake_case , _snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_snake_case , _snake_case , _snake_case ) ):
lowerCamelCase__: Optional[int] =os.path.join(_snake_case , _snake_case , _snake_case )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
lowerCamelCase__: List[str] =hf_hub_download(
_snake_case , filename=_add_variant(_snake_case , _snake_case ) , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.""" , _snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case , _snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_snake_case , _snake_case )}\' so that the correct variant file can be added.""" , _snake_case , )
try:
# 2. Load model file as usual
lowerCamelCase__: Dict =hf_hub_download(
_snake_case , filename=_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
"listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
"this model name. Check the model page at "
F"""\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
" \nCheckout your internet connection or see how to run the library in"
" offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'." )
except EnvironmentError:
raise EnvironmentError(
F"""Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from """
"\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. "
F"""Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory """
F"""containing a file named {weights_name}""" )
| 10
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315
| 0
|
from __future__ import annotations
import numpy as np
def _A ( __magic_name__ ):
return np.maximum(0 , __magic_name__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 201
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( __magic_name__=32 , __magic_name__=10 , __magic_name__=100 , __magic_name__=1026 , __magic_name__=True , __magic_name__="data/tokenized_stories_train_wikitext103.jbl" , __magic_name__="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
__magic_name__ , __magic_name__ , number=__magic_name__ , min_len=1026 , trim=__magic_name__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowercase__ = load_gpta("gpt2" ).to(__magic_name__ )
print("computing perplexity on objective set" )
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ ).item()
print("perplexity on objective set:" , __magic_name__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( __magic_name__ , __magic_name__=15 , __magic_name__=128 , __magic_name__=100 , __magic_name__="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(__magic_name__ )
# Train secondary learner
lowercase__ = train_secondary_learner(
__magic_name__ , __magic_name__ , max_epochs=__magic_name__ , batch_size=__magic_name__ , eval_freq=100 , igf_model_path=__magic_name__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=32 , __magic_name__=1000 , __magic_name__=16 , __magic_name__=1.0 , __magic_name__=recopy_gpta , __magic_name__=None , __magic_name__=10 , __magic_name__="gpt2_finetuned.pt" , ):
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowercase__ = RandomSampler(__magic_name__ )
lowercase__ = DataLoader(__magic_name__ , sampler=__magic_name__ )
lowercase__ = max_steps // (len(__magic_name__ )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__magic_name__ )
lowercase__ , lowercase__ , lowercase__ = recopy_model(__magic_name__ , __magic_name__ , __magic_name__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(__magic_name__ )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
for epoch in range(int(__magic_name__ ) ):
for step, example in enumerate(__magic_name__ ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(__magic_name__ , labels=__magic_name__ )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(__magic_name__ , dtype=torch.long , device=__magic_name__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__magic_name__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __magic_name__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__magic_name__ , default=__magic_name__ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__magic_name__ , default=__magic_name__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__magic_name__ , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__magic_name__ , default=__magic_name__ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=__magic_name__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=__magic_name__ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=__magic_name__ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=__magic_name__ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=__magic_name__ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=__magic_name__ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=__magic_name__ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=__magic_name__ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=__magic_name__ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=__magic_name__ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__magic_name__ , type=__magic_name__ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__magic_name__ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__magic_name__ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__magic_name__ , type=__magic_name__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__magic_name__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowercase__ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowercase__ = training_secondary_learner(
__magic_name__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=__magic_name__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__magic_name__ , __magic_name__ , __magic_name__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__magic_name__ , secondary_learner=__magic_name__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 201
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = "bert-generation"
def __init__( self , a=5_0_3_5_8 , a=1_0_2_4 , a=2_4 , a=1_6 , a=4_0_9_6 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=0.02 , a=1e-12 , a=0 , a=2 , a=1 , a="absolute" , a=True , **a , ) -> Optional[int]:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ : List[Any] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : List[str] = hidden_act
lowercase__ : str = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[Any] = initializer_range
lowercase__ : str = layer_norm_eps
lowercase__ : Tuple = position_embedding_type
lowercase__ : Tuple = use_cache
| 77
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCamelCase : Optional[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_UpperCamelCase : Optional[int] = {
"allenai/led-base-16384": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def a_ ( ):
'''simple docstring'''
lowercase__ : int = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__ : Union[str, Any] = bs[:]
lowercase__ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
lowercase__ : str = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Dict = set()
lowercase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Optional[Any] = char
return pairs
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , a , a , a="replace" , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=False , **a , ) -> Any:
lowercase__ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase__ : List[str] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase__ : List[str] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase__ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase__ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase__ : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding='utf-8' ) as vocab_handle:
lowercase__ : Tuple = json.load(a )
lowercase__ : Dict = {v: k for k, v in self.encoder.items()}
lowercase__ : str = errors # how to handle errors in decoding
lowercase__ : Optional[Any] = bytes_to_unicode()
lowercase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding='utf-8' ) as merges_handle:
lowercase__ : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
lowercase__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : Union[str, Any] = dict(zip(a , range(len(a ) ) ) )
lowercase__ : Tuple = {}
lowercase__ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : List[Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCAmelCase ( self ) -> List[Any]:
return len(self.encoder )
def _UpperCAmelCase ( self ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , a ) -> List[str]:
if token in self.cache:
return self.cache[token]
lowercase__ : Optional[Any] = tuple(a )
lowercase__ : int = get_pairs(a )
if not pairs:
return token
while True:
lowercase__ : List[str] = min(a , key=lambda a : self.bpe_ranks.get(a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : List[str] = bigram
lowercase__ : Union[str, Any] = []
lowercase__ : List[Any] = 0
while i < len(a ):
try:
lowercase__ : str = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Optional[int] = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int = tuple(a )
lowercase__ : Dict = new_word
if len(a ) == 1:
break
else:
lowercase__ : Any = get_pairs(a )
lowercase__ : List[str] = ' '.join(a )
lowercase__ : Optional[Any] = word
return word
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
lowercase__ : Tuple = []
for token in re.findall(self.pat , a ):
lowercase__ : Union[str, Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) )
return bpe_tokens
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , a ) -> Optional[int]:
return self.decoder.get(a )
def _UpperCAmelCase ( self , a ) -> str:
lowercase__ : Any = ''.join(a )
lowercase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ : str = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' )
lowercase__ : List[Any] = 0
with open(a , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowercase__ : Union[str, Any] = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
lowercase__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : Dict = [self.sep_token_id]
lowercase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , a , a=False , **a ) -> Optional[int]:
lowercase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase__ : List[str] = ' ' + text
return (text, kwargs)
def _UpperCAmelCase ( self , a , a = None , a = PaddingStrategy.DO_NOT_PAD , a = None , a = None , ) -> dict:
lowercase__ : Dict = super()._pad(
encoded_inputs=a , max_length=a , padding_strategy=a , pad_to_multiple_of=a , return_attention_mask=a , )
# Load from model defaults
if return_attention_mask is None:
lowercase__ : Union[str, Any] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ : Tuple = len(encoded_inputs['global_attention_mask'] ) != len(a )
if needs_to_be_padded:
lowercase__ : str = len(a ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ : Union[str, Any] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ : List[str] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 77
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """spiece.model"""}
_A = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
_A = {"""bert_for_seq_generation""": 5_1_2}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = VOCAB_FILES_NAMES
_lowerCamelCase :str = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :List[int] = []
_lowerCamelCase :Tuple = ["input_ids", "attention_mask"]
def __init__( self : int , UpperCamelCase : Dict , UpperCamelCase : List[Any]="<s>" , UpperCamelCase : int="</s>" , UpperCamelCase : Union[str, Any]="<unk>" , UpperCamelCase : Any="<pad>" , UpperCamelCase : Optional[int]="<::::>" , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Optional[int] , ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , sep_token=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = vocab_file
lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
@property
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.__dict__.copy()
lowerCAmelCase__ : Dict = None
return state
def __setstate__( self : str , UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : str ) -> Any:
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.sp_model.IdToPiece(UpperCamelCase )
return token
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Any ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase ) + token
lowerCAmelCase__ : Tuple = []
else:
current_sub_tokens.append(UpperCamelCase )
out_string += self.sp_model.decode(UpperCamelCase )
return out_string.strip()
def _lowerCAmelCase ( self : Dict , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Tuple = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , """wb""" ) as fi:
lowerCAmelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
| 366
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A = logging.get_logger(__name__)
_A = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_A = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_A = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase__ : Any = bs[:]
lowerCAmelCase__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Dict = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[Any] = set()
lowerCAmelCase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Optional[Any] = char
return pairs
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any="replace" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : int="<pad>" , UpperCamelCase : Dict="<mask>" , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
lowerCAmelCase__ : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : Any = json.load(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Dict = errors # how to handle errors in decoding
lowerCAmelCase__ : Union[str, Any] = bytes_to_unicode()
lowerCAmelCase__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.encoder )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Union[str, Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : List[str] = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : List[str] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : str = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = 0
while i < len(UpperCamelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : List[str] = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : List[Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : Tuple = new_word
if len(UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(UpperCamelCase )
lowerCAmelCase__ : Tuple = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Tuple = word
return word
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
for token in re.findall(self.pat , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """""".join(UpperCamelCase )
lowerCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
lowerCAmelCase__ : Optional[Any] = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : Dict = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : Tuple = """ """ + text
return (text, kwargs)
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : str , UpperCamelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.encode(UpperCamelCase )
if len(UpperCamelCase ) > self.model_max_length:
lowerCAmelCase__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 212
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE( UpperCamelCase__ ):
def __init__( self: str , UpperCamelCase: str , UpperCamelCase: int ) -> Optional[int]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self: int , UpperCamelCase: int = 1 , UpperCamelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase: float = 0.0 , UpperCamelCase: int = 50 , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[str] = "pil" , UpperCamelCase: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
snake_case__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case__ = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case__ = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
snake_case__ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 307
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCAmelCase = logging.getLogger(__name__)
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = '''token-classification'''
def __init__( self: int , UpperCamelCase_: Optional[Any] ) -> Dict:
"""simple docstring"""
if type(UpperCamelCase_ ) == dict:
lowercase__ = Namespace(**UpperCamelCase_ )
lowercase__ = import_module('''tasks''' )
try:
lowercase__ = getattr(UpperCamelCase_ , hparams.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
lowercase__ = self.token_classification_task.get_labels(hparams.labels )
lowercase__ = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase_ , len(self.labels ) , self.mode )
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase_: Optional[int] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
lowercase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**UpperCamelCase_ )
lowercase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowercase__ = torch.load(UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase__ = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase_ )
lowercase__ = self.token_classification_task.convert_examples_to_features(
UpperCamelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: bool = False ) -> DataLoader:
"""simple docstring"""
lowercase__ = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowercase__ = torch.load(UpperCamelCase_ )
lowercase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: int , UpperCamelCase_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
"""Compute validation""" ""
lowercase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**UpperCamelCase_ )
lowercase__ , lowercase__ = outputs[:2]
lowercase__ = logits.detach().cpu().numpy()
lowercase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: List[str] ) -> int:
"""simple docstring"""
lowercase__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
lowercase__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
lowercase__ = np.argmax(UpperCamelCase_ , axis=2 )
lowercase__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase__ = dict(enumerate(self.labels ) )
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
'''precision''': precision_score(UpperCamelCase_ , UpperCamelCase_ ),
'''recall''': recall_score(UpperCamelCase_ , UpperCamelCase_ ),
'''f1''': fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
lowercase__ = dict(results.items() )
lowercase__ = results
return ret, preds_list, out_label_list
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(UpperCamelCase_ )
lowercase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(UpperCamelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=UpperCamelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=UpperCamelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = NERTransformer(args)
lowerCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 110
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
__lowerCAmelCase : Any =TypeVar("""T""")
class _A ( Generic[T] ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = data
lowercase = self
lowercase = 0
class _A ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowercase = {}
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DisjointSetTreeNode(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.map[data]
if elem_ref != elem_ref.parent:
lowercase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if nodea.rank > nodea.rank:
lowercase = nodea
else:
lowercase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
self.link(self.find_set(__lowerCAmelCase ) , self.find_set(__lowerCAmelCase ) )
class _A ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowercase = {}
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if node not in self.connections:
lowercase = {}
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
lowercase = weight
lowercase = weight
def A__ ( self ):
"""simple docstring"""
lowercase = []
lowercase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __lowerCAmelCase : x[2] )
# creating the disjoint set
lowercase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__lowerCAmelCase )
# MST generation
lowercase = 0
lowercase = 0
lowercase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowercase , lowercase , lowercase = edges[index]
index += 1
lowercase = disjoint_set.find_set(__lowerCAmelCase )
lowercase = disjoint_set.find_set(__lowerCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
disjoint_set.union(__lowerCAmelCase , __lowerCAmelCase )
return graph
| 363
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32
| 0
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def snake_case_ (_a : Optional[int] , _a : Optional[Any]=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def snake_case_ (_a : Dict , _a : Dict=0 ):
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(_lowerCamelCase , n_shave_prefix_segments=_lowerCamelCase )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def snake_case_ (_a : str , _a : Optional[int]=0 ):
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(_lowerCamelCase , n_shave_prefix_segments=_lowerCamelCase )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def snake_case_ (_a : Dict , _a : int , _a : Optional[Any] , _a : Union[str, Any]=None , _a : Any=None , _a : Dict=None ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(_lowerCamelCase )
UpperCAmelCase = key.reshape(_lowerCamelCase )
UpperCAmelCase = value.reshape(_lowerCamelCase )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def snake_case_ (_a : Dict , _a : int ):
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(_lowerCamelCase )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(_lowerCamelCase )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(_lowerCamelCase )
}
for i in range(1 , _lowerCamelCase ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
UpperCAmelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
UpperCAmelCase = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
UpperCAmelCase = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
UpperCAmelCase = renew_resnet_paths(_lowerCamelCase )
UpperCAmelCase = {'''old''': F"input_blocks.{i}.0", '''new''': F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path, resnet_op] , config=_lowerCamelCase )
if len(_lowerCamelCase ):
UpperCAmelCase = renew_attention_paths(_lowerCamelCase )
UpperCAmelCase = {
'''old''': F"input_blocks.{i}.1",
'''new''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCAmelCase = {
F"input_blocks.{i}.1.qkv.bias": {
'''key''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'''query''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'''value''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
'''key''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'''query''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'''value''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=_lowerCamelCase , config=_lowerCamelCase , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(_lowerCamelCase )
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
UpperCAmelCase = renew_resnet_paths(_lowerCamelCase )
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
UpperCAmelCase = renew_attention_paths(_lowerCamelCase )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , attention_paths_to_split=_lowerCamelCase , config=_lowerCamelCase )
for i in range(_lowerCamelCase ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(_lowerCamelCase , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(_lowerCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_lowerCamelCase )
else:
UpperCAmelCase = [layer_name]
if len(_lowerCamelCase ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
UpperCAmelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
UpperCAmelCase = renew_resnet_paths(_lowerCamelCase )
UpperCAmelCase = renew_resnet_paths(_lowerCamelCase )
UpperCAmelCase = {'''old''': F"output_blocks.{i}.0", '''new''': F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
UpperCAmelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(_lowerCamelCase ) == 2:
UpperCAmelCase = []
if len(_lowerCamelCase ):
UpperCAmelCase = renew_attention_paths(_lowerCamelCase )
UpperCAmelCase = {
'''old''': F"output_blocks.{i}.1",
'''new''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCAmelCase = {
F"output_blocks.{i}.1.qkv.bias": {
'''key''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'''query''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'''value''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
'''key''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'''query''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'''value''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=_lowerCamelCase , )
else:
UpperCAmelCase = renew_resnet_paths(_lowerCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(_lowerCamelCase ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(_lowerCamelCase ), '''resnets''', str(_lowerCamelCase ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
A =parser.parse_args()
A =torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A =json.loads(f.read())
A =convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A =UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A =DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
A =VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
A =LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 34
|
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int | float | str ):
try:
A__ = float(_lowerCamelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
A__ = decimal - int(_lowerCamelCase )
if fractional_part == 0:
return int(_lowerCamelCase ), 1
else:
A__ = len(str(_lowerCamelCase ).split("." )[1] )
A__ = int(decimal * (10**number_of_frac_digits) )
A__ = 10**number_of_frac_digits
A__, A__ = denominator, numerator
while True:
A__ = dividend % divisor
if remainder == 0:
break
A__, A__ = divisor, remainder
A__, A__ = numerator / divisor, denominator / divisor
return int(_lowerCamelCase ), int(_lowerCamelCase )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction("67") = }""")
print(f"""{decimal_to_fraction("45.0") = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction("6.25") = }""")
print(f"""{decimal_to_fraction("78td") = }""")
| 237
| 0
|
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = len(_UpperCAmelCase )
for i in range(length - 1 ):
SCREAMING_SNAKE_CASE_: List[Any] = i
for k in range(i + 1 , _UpperCAmelCase ):
if collection[k] < collection[least]:
SCREAMING_SNAKE_CASE_: Any = k
if least != i:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : str = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 127
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase : Tuple = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = """hopper-medium-v2"""
lowerCAmelCase : Optional[int] = gym.make(env_name)
lowerCAmelCase : Optional[int] = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
lowerCAmelCase : Optional[Any] = env.reset()
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Optional[int] = 1000
lowerCAmelCase : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase : Union[str, Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = env.step(denorm_actions)
lowerCAmelCase : Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase : Tuple = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 127
| 1
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_snake_case : str = {
"allenai/led-base-16384": 16_384,
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = LEDTokenizer
__UpperCAmelCase : str = ["input_ids", "attention_mask"]
def __init__( self : str , lowerCamelCase : Any=None , lowerCamelCase : List[Any]=None , lowerCamelCase : str=None , lowerCamelCase : Dict="replace" , lowerCamelCase : List[str]="<s>" , lowerCamelCase : Tuple="</s>" , lowerCamelCase : Tuple="</s>" , lowerCamelCase : List[str]="<s>" , lowerCamelCase : Union[str, Any]="<unk>" , lowerCamelCase : Tuple="<pad>" , lowerCamelCase : Dict="<mask>" , lowerCamelCase : int=False , lowerCamelCase : Optional[Any]=True , **lowerCamelCase : Optional[int] , ) -> Tuple:
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , **lowerCamelCase , )
__snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__snake_case : Any = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__snake_case : Tuple = add_prefix_space
__snake_case : Union[str, Any] = pre_tok_class(**lowerCamelCase )
__snake_case : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Dict = "post_processor"
__snake_case : List[Any] = getattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
if tokenizer_component_instance:
__snake_case : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Dict = tuple(state["sep"] )
if "cls" in state:
__snake_case : List[str] = tuple(state["cls"] )
__snake_case : int = False
if state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__snake_case : Tuple = add_prefix_space
__snake_case : Any = True
if state.get("trim_offsets" , lowerCamelCase ) != trim_offsets:
__snake_case : Optional[Any] = trim_offsets
__snake_case : Optional[int] = True
if changes_to_apply:
__snake_case : List[Any] = getattr(lowerCamelCase , state.pop("type" ) )
__snake_case : Union[str, Any] = component_class(**lowerCamelCase )
setattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __snake_case ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self : Tuple , lowerCamelCase : Optional[int] ) -> Any:
__snake_case : Union[str, Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else value
__snake_case : Dict = value
def __snake_case ( self : int , *lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] ) -> BatchEncoding:
__snake_case : Dict = kwargs.get("is_split_into_words" , lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> BatchEncoding:
__snake_case : Union[str, Any] = kwargs.get("is_split_into_words" , lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
__snake_case : Tuple = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str=None ) -> Dict:
__snake_case : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : List[Any] = [self.sep_token_id]
__snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : Any , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ) -> dict:
__snake_case : str = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__snake_case : Tuple = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__snake_case : Optional[Any] = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 123
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Any = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
__snake_case : Optional[int] = 1_0_2_4
__snake_case : List[Any] = 4_0_9_6
__snake_case : List[Any] = 2_4
__snake_case : Optional[Any] = 1_6
__snake_case : str = [5, 1_1, 1_7, 2_3]
__snake_case : List[str] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__snake_case : Union[str, Any] = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
__snake_case : Tuple = 7_6_8
__snake_case : Any = [1, 1, 1, 0.5]
__snake_case : Any = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
__snake_case : Any = 1_5_0
__snake_case : Optional[Any] = 1_6
__snake_case : List[str] = (1, 3_8_4, 3_8_4)
__snake_case : Tuple = False
__snake_case : Optional[Any] = "project"
if "ade" in checkpoint_url:
__snake_case : Optional[int] = True
__snake_case : List[str] = 7_6_8
__snake_case : int = [1, 1, 1, 0.5]
__snake_case : Any = 1_5_0
__snake_case : Tuple = 1_6
__snake_case : List[str] = "huggingface/label-files"
__snake_case : Union[str, Any] = "ade20k-id2label.json"
__snake_case : List[str] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
__snake_case : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : str = {v: k for k, v in idalabel.items()}
__snake_case : Tuple = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__snake_case : Tuple = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__snake_case : Tuple = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__snake_case : Optional[Any] = name.replace("patch_embed" , "" )
if "pos_embed" in name:
__snake_case : Optional[int] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__snake_case : List[str] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__snake_case : Union[str, Any] = name.replace("proj" , "projection" )
if "blocks" in name:
__snake_case : int = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__snake_case : Tuple = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__snake_case : Any = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
__snake_case : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
__snake_case : Any = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__snake_case : Dict = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__snake_case : Union[str, Any] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__snake_case : List[Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__snake_case : str = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__snake_case : List[str] = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__snake_case : Optional[int] = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__snake_case : Optional[int] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__snake_case : int = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__snake_case : Any = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__snake_case : List[Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__snake_case : Tuple = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__snake_case : List[str] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__snake_case : str = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__snake_case : Optional[int] = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__snake_case : List[str] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__snake_case : Dict = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__snake_case : Tuple = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__snake_case : int = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__snake_case : Optional[Any] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__snake_case : Optional[int] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__snake_case : Dict = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__snake_case : Union[str, Any] = name.replace("pretrained" , "dpt" )
if "bn" in name:
__snake_case : Tuple = name.replace("bn" , "batch_norm" )
if "head" in name:
__snake_case : Dict = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__snake_case : Optional[int] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__snake_case : Tuple = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
__snake_case : str = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
__snake_case : Tuple = name.replace(".." , "." )
if "stem.conv" in name:
__snake_case : int = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__snake_case : Any = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
__snake_case : Optional[int] = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
__snake_case : List[Any] = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
__snake_case : Optional[int] = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
__snake_case : int = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
__snake_case : Optional[Any] = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : int = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
__snake_case : Any = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : str = in_proj_weight[: config.hidden_size, :]
__snake_case : List[Any] = in_proj_bias[: config.hidden_size]
__snake_case : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ):
__snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : int = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Optional[int] = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__snake_case : Optional[int] = torch.load(__lowerCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
__snake_case : Optional[int] = state_dict.pop(__lowerCamelCase )
__snake_case : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
__snake_case : Dict = DPTForSemanticSegmentation(__lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
__snake_case : str = 4_8_0 if "ade" in checkpoint_url else 3_8_4
__snake_case : Any = DPTImageProcessor(size=__lowerCamelCase )
__snake_case : int = prepare_img()
__snake_case : Union[str, Any] = image_processor(__lowerCamelCase , return_tensors="pt" )
# forward pass
__snake_case : Dict = model(**__lowerCamelCase ).logits if "ade" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
if show_prediction:
__snake_case : int = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=__lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_snake_case : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 123
| 1
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> int:
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
snake_case__ : Any = grid[0]
for row_n in range(1 , len(_lowerCAmelCase ) ):
snake_case__ : List[str] = grid[row_n]
snake_case__ : Tuple = fill_row(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Optional[int] = grid[row_n]
return grid[-1][-1]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(_lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a = logging.getLogger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , snake_case_ : Tuple=None ):
super().__init__(
snake_case_ , question_encoder_tokenizer=snake_case_ , generator_tokenizer=snake_case_ , index=snake_case_ , init_retrieval=snake_case_ , )
snake_case__ : int = None
def lowerCamelCase ( self : int , snake_case_ : int ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
snake_case__ : Optional[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
snake_case__ : int = str(distributed_port + 1 )
snake_case__ : List[str] = dist.new_group(ranks=snake_case_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase ( self : Optional[Any] ):
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase ( self : int , snake_case_ : str , snake_case_ : int , snake_case_ : int=torch.floataa ):
snake_case__ : str = torch.empty(snake_case_ , dtype=snake_case_ )
dist.scatter(snake_case_ , src=0 , scatter_list=snake_case_ , group=self.process_group )
return target_tensor
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Dict = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
snake_case__ : Dict = next((addr for addr in addrs if addr.startswith("""e""" )) , snake_case_ )
return ifname
def lowerCamelCase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : int ):
# single GPU training
if not dist.is_initialized():
snake_case__ , snake_case__ : Union[str, Any] = self._main_retrieve(snake_case_ , snake_case_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case_ )
# distributed training
snake_case__ : Optional[int] = dist.get_world_size(group=self.process_group )
# gather logic
snake_case__ : str = None
if self._is_main():
snake_case__ : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(snake_case_ )]
dist.gather(torch.tensor(snake_case_ ) , dst=0 , gather_list=snake_case_ , group=self.process_group )
# scatter logic
snake_case__ : Union[str, Any] = question_hidden_states.shape[0]
snake_case__ : List[str] = []
snake_case__ : Dict = []
if self._is_main():
assert len(snake_case_ ) == world_size
snake_case__ , snake_case__ : Union[str, Any] = self._main_retrieve(torch.cat(snake_case_ ).numpy() , snake_case_ )
snake_case__ , snake_case__ : Dict = torch.tensor(snake_case_ ), torch.tensor(snake_case_ )
snake_case__ : Union[str, Any] = self._chunk_tensor(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self._chunk_tensor(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self._scattered(snake_case_ , [n_queries, n_docs] , target_type=torch.intaa )
snake_case__ : Dict = self._scattered(snake_case_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case_ )
| 43
| 1
|
"""simple docstring"""
import requests
a__ : Dict = '''''' # <-- Put your OpenWeatherMap appid here!
a__ : Union[str, Any] = '''https://api.openweathermap.org/data/2.5/'''
def UpperCAmelCase__ (lowerCAmelCase_ = "Chicago" , lowerCAmelCase_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def UpperCAmelCase__ (lowerCAmelCase_ = "Kolkata, India" , lowerCAmelCase_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def UpperCAmelCase__ (lowerCAmelCase_ = 55.68 , lowerCAmelCase_ = 12.57 , lowerCAmelCase_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
a__ : Any = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 54
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if start is None:
__SCREAMING_SNAKE_CASE = 0
if end is None:
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) - 1
if start >= end:
return
__SCREAMING_SNAKE_CASE = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 54
| 1
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
_SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
_SCREAMING_SNAKE_CASE = {}
class a ( __a ):
"""simple docstring"""
lowerCamelCase :List[str] = VOCAB_FILES_NAMES
lowerCamelCase :Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :str = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :str = HerbertTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_="</s>" , **lowerCAmelCase_ , ) -> List[Any]:
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_A = [self.cls_token_id]
_A = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_A = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 368
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 81
| 0
|
import math
__A =1_0
__A =7
__A =BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase_ ( lowerCamelCase__ = 2_0 ):
lowerCamelCase_ = math.comb(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
lowerCamelCase_ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 19
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 78
| 0
|
"""simple docstring"""
def _A ( UpperCamelCase_ : int = 1000) -> Tuple:
'''simple docstring'''
__lowercase = 1, 1
__lowercase = []
for i in range(1, n + 1):
__lowercase = prev_numerator + 2 * prev_denominator
__lowercase = prev_numerator + prev_denominator
if len(str(__SCREAMING_SNAKE_CASE)) > len(str(__SCREAMING_SNAKE_CASE)):
result.append(__SCREAMING_SNAKE_CASE)
__lowercase = numerator
__lowercase = denominator
return len(__SCREAMING_SNAKE_CASE)
if __name__ == "__main__":
print(F"{solution() = }")
| 370
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Optional[Any] ):
__lowercase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
__lowercase = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]], dtype=tf.intaa, ) # J'aime le camembert !"
__lowercase = model(UpperCAmelCase__ )["last_hidden_state"]
__lowercase = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape, UpperCAmelCase__ )
# compare the actual values for a slice.
__lowercase = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]], dtype=tf.floataa, )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4 ) )
| 144
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "tokenizer"]
_UpperCAmelCase = "OwlViTImageProcessor"
_UpperCAmelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self: str , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Dict ) -> Dict:
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: str , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: List[Any]=None , UpperCamelCase: List[str]=None , UpperCamelCase: str="max_length" , UpperCamelCase: Dict="np" , **UpperCamelCase: List[str] ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )):
snake_case__ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ):
snake_case__ = []
# Maximum number of queries across batch
snake_case__ = max([len(UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase ) != max_num_queries:
snake_case__ = t + [' '] * (max_num_queries - len(UpperCamelCase ))
snake_case__ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
encodings.append(UpperCamelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
snake_case__ = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
snake_case__ = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
snake_case__ = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
snake_case__ = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
snake_case__ = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
snake_case__ = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
snake_case__ = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
snake_case__ = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
snake_case__ = BatchEncoding()
snake_case__ = input_ids
snake_case__ = attention_mask
if query_images is not None:
snake_case__ = BatchEncoding()
snake_case__ = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values
snake_case__ = query_pixel_values
if images is not None:
snake_case__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
snake_case__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
snake_case__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: Any ) -> Tuple:
return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Any , *UpperCamelCase: List[Any] , **UpperCamelCase: Any ) -> Tuple:
return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , *UpperCamelCase: Union[str, Any] , **UpperCamelCase: Dict ) -> str:
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , *UpperCamelCase: Optional[int] , **UpperCamelCase: Optional[Any] ) -> Dict:
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Any , *UpperCamelCase: int , **UpperCamelCase: List[str] ) -> List[str]:
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , )
return self.image_processor
| 307
|
import os
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' )
with open(_A ) as file_hand:
return str(sum(int(_A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 307
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = UnCLIPImageVariationPipeline
a__ : List[Any] = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
a__ : int = IMAGE_VARIATION_BATCH_PARAMS
a__ : List[Any] = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
a__ : Any = False
@property
def a ( self : Tuple ):
return 32
@property
def a ( self : int ):
return 32
@property
def a ( self : int ):
return self.time_input_dim
@property
def a ( self : Dict ):
return self.time_input_dim * 4
@property
def a ( self : List[Any] ):
return 1_00
@property
def a ( self : List[str] ):
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a ( self : Optional[Any] ):
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def a ( self : List[str] ):
torch.manual_seed(0 )
__UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(_lowercase )
@property
def a ( self : Optional[Any] ):
torch.manual_seed(0 )
__UpperCAmelCase = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
__UpperCAmelCase = UnCLIPTextProjModel(**_lowercase )
return model
@property
def a ( self : Tuple ):
torch.manual_seed(0 )
__UpperCAmelCase = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
__UpperCAmelCase = UNetaDConditionModel(**_lowercase )
return model
@property
def a ( self : Dict ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a ( self : Union[str, Any] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a ( self : List[str] ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
__UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a ( self : Any ):
__UpperCAmelCase = self.dummy_decoder
__UpperCAmelCase = self.dummy_text_proj
__UpperCAmelCase = self.dummy_text_encoder
__UpperCAmelCase = self.dummy_tokenizer
__UpperCAmelCase = self.dummy_super_res_first
__UpperCAmelCase = self.dummy_super_res_last
__UpperCAmelCase = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
__UpperCAmelCase = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
__UpperCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
__UpperCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a ( self : Any , _lowercase : Optional[Any] , _lowercase : int=0 , _lowercase : List[Any]=True ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
if pil_image:
__UpperCAmelCase = input_image * 0.5 + 0.5
__UpperCAmelCase = input_image.clamp(0 , 1 )
__UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__UpperCAmelCase = DiffusionPipeline.numpy_to_pil(_lowercase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a ( self : Dict ):
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase , pil_image=_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = self.get_dummy_inputs(_lowercase , pil_image=_lowercase )
__UpperCAmelCase = pipe(
**_lowercase , return_dict=_lowercase , )[0]
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self : int ):
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase , pil_image=_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = self.get_dummy_inputs(_lowercase , pil_image=_lowercase )
__UpperCAmelCase = pipe(
**_lowercase , return_dict=_lowercase , )[0]
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self : List[Any] ):
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase , pil_image=_lowercase )
__UpperCAmelCase = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = self.get_dummy_inputs(_lowercase , pil_image=_lowercase )
__UpperCAmelCase = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
__UpperCAmelCase = pipe(
**_lowercase , return_dict=_lowercase , )[0]
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__UpperCAmelCase = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self : Dict ):
__UpperCAmelCase = torch.device('''cpu''' )
class _UpperCAmelCase :
a__ : Union[str, Any] = 1
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
__UpperCAmelCase = pipe.decoder.dtype
__UpperCAmelCase = 1
__UpperCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__UpperCAmelCase = pipe.prepare_latents(
_lowercase , dtype=_lowercase , device=_lowercase , generator=_lowercase , latents=_lowercase , scheduler=DummyScheduler() )
__UpperCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__UpperCAmelCase = pipe.prepare_latents(
_lowercase , dtype=_lowercase , device=_lowercase , generator=_lowercase , latents=_lowercase , scheduler=DummyScheduler() )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase , pil_image=_lowercase )
__UpperCAmelCase = pipe(
**_lowercase , decoder_latents=_lowercase , super_res_latents=_lowercase ).images
__UpperCAmelCase = self.get_dummy_inputs(_lowercase , pil_image=_lowercase )
# Don't pass image, instead pass embedding
__UpperCAmelCase = pipeline_inputs.pop('''image''' )
__UpperCAmelCase = pipe.image_encoder(_lowercase ).image_embeds
__UpperCAmelCase = pipe(
**_lowercase , decoder_latents=_lowercase , super_res_latents=_lowercase , image_embeddings=_lowercase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def a ( self : Dict ):
__UpperCAmelCase = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__UpperCAmelCase = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , expected_max_diff=_lowercase )
@skip_mps
def a ( self : Union[str, Any] ):
__UpperCAmelCase = torch_device == '''cpu'''
__UpperCAmelCase = True
__UpperCAmelCase = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , additional_params_copy_to_batched_inputs=_lowercase , )
def a ( self : Optional[int] ):
__UpperCAmelCase = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__UpperCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_lowercase , additional_params_copy_to_batched_inputs=_lowercase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_lowercase )
@skip_mps
def a ( self : List[Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_local()
@skip_mps
def a ( self : Dict ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
__UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipeline(
_lowercase , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(_lowercase , _lowercase , 15 )
| 86
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
| 1
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 273
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int("1" + "0" * digit_len )
for num in range(UpperCAmelCase_ , UpperCAmelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCAmelCase_ , UpperCAmelCase_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def __snake_case ( UpperCAmelCase_ : int = 2 ):
lowerCamelCase_ = 1.0
for fraction in fraction_list(UpperCAmelCase_ ):
lowerCamelCase_ = Fraction(UpperCAmelCase_ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 355
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333
| 0
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase_ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''depth_multiplier''' ) )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=3_2 , _a=0.25 , _a=8 , _a=8 , _a=6 , _a=3_2 , _a=True , _a=True , _a=True , _a="relu6" , _a=1_2_8_0 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=1_0 , _a=None , ) -> Optional[int]:
_a : Any = parent
_a : List[str] = batch_size
_a : Optional[int] = num_channels
_a : Optional[int] = image_size
_a : List[Any] = depth_multiplier
_a : List[Any] = depth_divisible_by
_a : Optional[Any] = min_depth
_a : Tuple = expand_ratio
_a : Tuple = tf_padding
_a : Dict = output_stride
_a : Optional[int] = first_layer_is_expansion
_a : int = finegrained_output
_a : Tuple = hidden_act
_a : Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_a : List[str] = classifier_dropout_prob
_a : Any = use_labels
_a : Dict = is_training
_a : Optional[Any] = num_labels
_a : str = initializer_range
_a : str = scope
def __lowercase ( self ) -> Optional[int]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
_a : Optional[int] = None
if self.use_labels:
_a : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self ) -> int:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , _a , _a , _a , _a ) -> Optional[Any]:
_a : Optional[int] = MobileNetVaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_a : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __lowercase ( self , _a , _a , _a , _a ) -> Union[str, Any]:
_a : str = self.num_labels
_a : Union[str, Any] = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_a : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a ) -> Any:
_a : Any = self.num_labels
_a : str = MobileNetVaForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_a : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a : Any = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self ) -> Optional[Any]:
_a : List[Any] = self.prepare_config_and_inputs()
_a : str = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : int = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Optional[Any] = False
def __lowercase ( self ) -> str:
_a : Dict = MobileNetVaModelTester(self )
_a : Union[str, Any] = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __lowercase ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __lowercase ( self ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __lowercase ( self ) -> List[str]:
pass
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(SCREAMING_SNAKE_CASE__ )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[str] = [*signature.parameters.keys()]
_a : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __lowercase ( self ) -> Optional[Any]:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __lowercase ( self ) -> Optional[int]:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
_a : Optional[Any] = outputs.hidden_states
_a : Optional[int] = 1_6
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowercase ( self ) -> int:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@slow
def __lowercase ( self ) -> Dict:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> Optional[Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __lowercase ( self ) -> Union[str, Any]:
_a : Any = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(SCREAMING_SNAKE_CASE__ )
_a : Any = self.default_image_processor
_a : int = prepare_img()
_a : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
_a : Dict = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
_a : Optional[int] = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
_a : Tuple = torch.tensor([0.2445, -1.1993, 0.1905] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Union[str, Any]:
_a : Union[str, Any] = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
_a : Optional[Any] = model.to(SCREAMING_SNAKE_CASE__ )
_a : int = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
_a : Dict = prepare_img()
_a : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
_a : Dict = model(**SCREAMING_SNAKE_CASE__ )
_a : str = outputs.logits
# verify the logits
_a : str = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
_a : Dict = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 235
|
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : str ) -> list[int]:
"""simple docstring"""
a_ : Any = int(__A )
# Initialize Result
a_ : Tuple = []
# Traverse through all denomination
for denomination in reversed(__A ):
# Find denominations
while int(__A ) >= int(__A ):
total_value -= int(__A )
answer.append(__A ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Union[str, Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
UpperCAmelCase_ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCAmelCase_ : str = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase_ : str = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'Following is minimal change for {value}: ')
UpperCAmelCase_ : Optional[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 32
| 0
|
import baseaa
def _a ( SCREAMING_SNAKE_CASE_ : str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def _a ( SCREAMING_SNAKE_CASE_ : bytes ):
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode("utf-8" )
if __name__ == "__main__":
UpperCamelCase__ = """Hello World!"""
UpperCamelCase__ = baseaa_encode(test)
print(encoded)
UpperCamelCase__ = baseaa_decode(encoded)
print(decoded)
| 102
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def __SCREAMING_SNAKE_CASE( *_A , **_A ):
"""simple docstring"""
pass
def _a ( SCREAMING_SNAKE_CASE_ : Image ):
__lowerCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a__ ( unittest.TestCase ):
_a : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DepthEstimationPipeline(model=_A , image_processor=_A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , _A )
import datasets
__lowerCAmelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__lowerCAmelCase = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , _A , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@slow
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "Intel/dpt-large"
__lowerCAmelCase = pipeline("depth-estimation" , model=_A )
__lowerCAmelCase = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__lowerCAmelCase = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_62 )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 102
| 1
|
"""simple docstring"""
import math
import unittest
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def UpperCamelCase_ ( self ):
with self.assertRaises(_lowerCamelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 220
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = CTRLTokenizer
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase ) )
def lowercase__ ( self : Union[str, Any], **lowerCamelCase : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), lowerCamelCase )
| 207
| 0
|
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__snake_case : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''sgugger/tiny-distilbert-classification'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
__snake_case : Optional[Any] = TensorFlowBenchmark(a_ )
__snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Any = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ )
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Dict = TensorFlowBenchmark(a_ , [config] )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : int = TensorFlowBenchmark(a_ )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
__snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , '''sequential''' ) )
self.assertTrue(hasattr(a_ , '''cumulative''' ) )
self.assertTrue(hasattr(a_ , '''current''' ) )
self.assertTrue(hasattr(a_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ )
__snake_case : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
| 24
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Union[str, Any] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Tuple = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE_ : str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE_ : Any = CLIPTextModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str = TextToVideoSDPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = 'np'
SCREAMING_SNAKE_CASE_ : Optional[int] = sd_pipe(**_SCREAMING_SNAKE_CASE ).frames
SCREAMING_SNAKE_CASE_ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
SCREAMING_SNAKE_CASE_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Any = pipe.to('cuda' )
SCREAMING_SNAKE_CASE_ : Tuple = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE_ : List[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
SCREAMING_SNAKE_CASE_ : Any = pipe.to('cuda' )
SCREAMING_SNAKE_CASE_ : Any = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE_ : Tuple = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 253
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {'vocab_file': 'spiece.model'}
lowerCAmelCase : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
lowerCAmelCase : Optional[int] = {'bert_for_seq_generation': 5_12}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<::::>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
SCREAMING_SNAKE_CASE_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ : Optional[int] = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 253
| 1
|
from __future__ import annotations
from collections.abc import MutableSequence
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None:
if len(_SCREAMING_SNAKE_CASE ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase_ : list[float] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = degree
def __add__( self ,_SCREAMING_SNAKE_CASE ) -> Polynomial:
if self.degree > polynomial_a.degree:
UpperCAmelCase_ : Union[str, Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,_SCREAMING_SNAKE_CASE )
def __sub__( self ,_SCREAMING_SNAKE_CASE ) -> Polynomial:
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self ,_SCREAMING_SNAKE_CASE ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int | float:
UpperCAmelCase_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
UpperCAmelCase_ : Optional[int] = ''''''
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_SCREAMING_SNAKE_CASE )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def a__ ( self ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ : List[str] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE = 0 ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ : List[Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ : int = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,_SCREAMING_SNAKE_CASE )
def __eq__( self ,_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self ,_SCREAMING_SNAKE_CASE ) -> bool:
return not self.__eq__(_SCREAMING_SNAKE_CASE )
| 235
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase_ : str = BlipImageProcessor()
UpperCAmelCase_ : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCAmelCase_ : Optional[Any] = BlipaProcessor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).tokenizer
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).image_processor
def a__ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : int = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
UpperCAmelCase_ : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : str = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : int = processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> int:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = '''lower newer'''
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = tokenizer(_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : int = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Any = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 235
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.